Import chromium-110.0.5481.100

This commit is contained in:
importer 2023-02-19 12:34:07 +08:00 committed by klzgrad
commit 253361980e
12400 changed files with 3072676 additions and 0 deletions

43
src/.clang-format Normal file
View File

@ -0,0 +1,43 @@
# Defines the Chromium style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium
# This defaults to 'Auto'. Explicitly set it for a while, so that
# 'vector<vector<int> >' in existing files gets formatted to
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
# 'int>>' if the file already contains at least one such instance.)
Standard: Cpp11
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
# the Chromium style (is implied by BasedOnStyle: Chromium).
InsertBraces: true
# Make sure code like:
# IPC_BEGIN_MESSAGE_MAP()
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
# IPC_END_MESSAGE_MAP()
# gets correctly indented.
MacroBlockBegin: "^\
BEGIN_MSG_MAP|\
BEGIN_MSG_MAP_EX|\
BEGIN_SAFE_MSG_MAP_EX|\
CR_BEGIN_MSG_MAP_EX|\
IPC_BEGIN_MESSAGE_MAP|\
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
IPC_STRUCT_BEGIN|\
IPC_STRUCT_BEGIN_WITH_PARENT|\
IPC_STRUCT_TRAITS_BEGIN|\
POLPARAMS_BEGIN|\
PPAPI_BEGIN_MESSAGE_MAP$"
MacroBlockEnd: "^\
CR_END_MSG_MAP|\
END_MSG_MAP|\
IPC_END_MESSAGE_MAP|\
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
IPC_STRUCT_END|\
IPC_STRUCT_TRAITS_END|\
POLPARAMS_END|\
PPAPI_END_MESSAGE_MAP$"
# TODO: Remove this once clang-format r357700 is rolled in.
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']

58
src/.gitattributes vendored Normal file
View File

@ -0,0 +1,58 @@
# Stop Windows python license check presubmit errors by forcing LF checkout.
*.py text eol=lf
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
/net/http/*.pins text eol=lf
# Force LF checkout for all source files
*.bin binary
*.c text eol=lf
*.cc text eol=lf
*.cpp text eol=lf
*.csv text eol=lf
*.grd text eol=lf
*.grdp text eol=lf
*.gn text eol=lf
*.gni text eol=lf
*.h text eol=lf
*.html text eol=lf
*.idl text eol=lf
*.in text eol=lf
*.inc text eol=lf
*.java text eol=lf
*.js text eol=lf
*.json text eol=lf
*.json5 text eol=lf
*.md text eol=lf
*.mm text eol=lf
*.mojom text eol=lf
*.pdf -diff
*.proto text eol=lf
*.rs text eol=lf
*.sh text eol=lf
*.sql text eol=lf
*.toml text eol=lf
*.txt text eol=lf
*.xml text eol=lf
*.xslt text eol=lf
.clang-format text eol=lf
.eslintrc.js text eol=lf
.git-blame-ignore-revs text eol=lf
.gitattributes text eol=lf
.gitignore text eol=lf
.vpython text eol=lf
codereview.settings text eol=lf
DEPS text eol=lf
ATL_OWNERS text eol=lf
LICENSE text eol=lf
LICENSE.* text eol=lf
MAJOR_BRANCH_DATE text eol=lf
OWNERS text eol=lf
README text eol=lf
README.* text eol=lf
WATCHLISTS text eol=lf
VERSION text eol=lf
DIR_METADATA text eol=lf
# Skip Tricium by default on files in third_party.
third_party/** -tricium

172
src/.gn Normal file
View File

@ -0,0 +1,172 @@
# This file is used by the GN meta build system to find the root of the source
# tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line.
import("//build/dotfile_settings.gni")
import("//third_party/angle/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"
# These arguments override the default values for items in a declare_args
# block. "gn args" in turn can override these.
#
# In general the value for a build arg in the declare_args block should be the
# default. In some cases, a DEPS-ed in project will want different defaults for
# being built as part of Chrome vs. being built standalone. In this case, the
# Chrome defaults should go here. There should be no overrides here for
# values declared in the main Chrome repository.
#
# Important note for defining defaults: This file is executed before the
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
# This means that the default_args can not depend on the platform,
# architecture, or other build parameters. If you really need that, the other
# repo should define a flag that toggles on a behavior that implements the
# additional logic required by Chrome to set the variables.
default_args = {
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
# a warning that "Build argument has no effect". When adding a v8 variable, it
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
# location when it is removed).
v8_enable_gdbjit = false
v8_imminent_deprecation_warnings = false
# Don't include webrtc's builtin task queue implementation.
rtc_link_task_queue_impl = false
# Don't include the iLBC audio codec.
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
# deps on codecs, we can remove this.
rtc_include_ilbc = false
# Changes some setup for the Crashpad build to set them to build against
# Chromium's zlib, base, etc.
crashpad_dependencies = "chromium"
# Override ANGLE's Vulkan dependencies.
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
angle_vulkan_validation_layers_dir =
"//third_party/vulkan-deps/vulkan-validation-layers/src"
# Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 10
devtools_visibility = [ "*" ]
}
# These are the targets to skip header checking by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will not have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
no_check_targets = [
# crbug.com/1158989
"//headless:headless_renderer", # 12 errors
"//headless:headless_shared_sources", # 2 errors
# //v8, https://crbug.com/v8/7330
"//v8/src/inspector:inspector", # 20 errors
"//v8/test/cctest:cctest_sources", # 15 errors
"//v8/test/unittests:inspector_unittests_sources", # 2 errors
"//v8:cppgc_base", # 1 error
"//v8:v8_internal_headers", # 11 errors
"//v8:v8_libplatform", # 2 errors
# After making partition_alloc a standalone library, remove partition_alloc
# target from the skip list, because partition_aloc will depend on its own
# base.
# partition alloc standalone library bug is https://crbug.com/1151236.
"//base/allocator/partition_allocator:partition_alloc", # 292 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
# PLEASE READ
#
# You should almost never need to add new exec_script calls. exec_script is
# slow, especially on Windows, and can cause confusing effects. Although
# individually each call isn't slow or necessarily very confusing, at the scale
# of our repo things get out of hand quickly. By strongly pushing back on all
# additions, we keep the build fast and clean. If you think you need to add a
# new call, please consider:
#
# - Do not use a script to check for the existence of a file or directory to
# enable a different mode. Instead, use GN build args to enable or disable
# functionality and set options. An example is checking for a file in the
# src-internal repo to see if the corresponding src-internal feature should
# be enabled. There are several things that can go wrong with this:
#
# - It's mysterious what causes some things to happen. Although in many cases
# such behavior can be conveniently automatic, GN optimizes for explicit
# and obvious behavior so people can more easily diagnose problems.
#
# - The user can't enable a mode for one build and not another. With GN build
# args, the user can choose the exact configuration of multiple builds
# using one checkout. But implicitly basing flags on the state of the
# checkout, this functionality is broken.
#
# - It's easy to get stale files. If for example the user edits the gclient
# to stop checking out src-internal (or any other optional thing), it's
# easy to end up with stale files still mysteriously triggering build
# conditions that are no longer appropriate (yes, this happens in real
# life).
#
# - Do not use a script to iterate files in a directory (glob):
#
# - This has the same "stale file" problem as the above discussion. Various
# operations can leave untracked files in the source tree which can cause
# surprising effects.
#
# - It becomes impossible to use "git grep" to find where a certain file is
# referenced. This operation is very common and people really do get
# confused when things aren't listed.
#
# - It's easy to screw up. One common case is a build-time script that packs
# up a directory. The author notices that the script isn't re-run when the
# directory is updated, so adds a glob so all the files are listed as
# inputs. This seems to work great... until a file is deleted. When a
# file is deleted, all the inputs the glob lists will still be up to date
# and no command-lines will have been changed. The action will not be
# re-run and the build will be broken. It is possible to get this correct
# using glob, and it's possible to mess it up without glob, but globs make
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.
"//build_overrides/build.gni",
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
"//chrome/version.gni",
# TODO(dgn): Layer violation but breaks the build otherwise, see
# https://crbug.com/474506.
"//clank/java/BUILD.gn",
"//clank/native/BUILD.gn",
"//google_apis/BUILD.gn",
"//printing/BUILD.gn",
"//remoting/host/installer/linux/BUILD.gn",
"//remoting/remoting_version.gni",
"//remoting/host/installer/win/generate_clsids.gni",
"//tools/grit/grit_rule.gni",
"//tools/gritsettings/BUILD.gn",
]

1486
src/AUTHORS Normal file

File diff suppressed because it is too large Load Diff

1789
src/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

4959
src/DEPS Normal file

File diff suppressed because it is too large Load Diff

27
src/LICENSE Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2015 The Chromium Authors
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google LLC nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4849
src/base/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

37
src/base/DEPS Normal file
View File

@ -0,0 +1,37 @@
include_rules = [
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",
"+third_party/perfetto/include",
"+third_party/perfetto/protos/perfetto",
# Conversions between base and Rust types (e.g. base::span <-> rust::Slice)
# require the cxx.h header from cxx. This is only used if Rust is enabled
# in the gn build; see //base/BUILD.gn's conditional dependency on
# //build/rust:cxx_cppdeps.
"+third_party/rust/cxx",
"+third_party/test_fonts",
# JSON Deserialization.
"+third_party/rust/serde_json_lenient/v0_1/wrapper",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",
"-url",
# ICU dependendencies must be separate from the rest of base.
"-i18n",
# //base/util can use //base but not vice versa.
"-util",
]
specific_include_rules = {
# Special case
"process/current_process(|_test)\.h": [
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
],
}

3
src/base/DIR_METADATA Normal file
View File

@ -0,0 +1,3 @@
monorail {
component: "Internals>Core"
}

51
src/base/OWNERS Normal file
View File

@ -0,0 +1,51 @@
# See //base/README.md to find qualification for being an owner.
set noparent
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes.
altimin@chromium.org
danakj@chromium.org
dcheng@chromium.org
fdoray@chromium.org
gab@chromium.org
kylechar@chromium.org
mark@chromium.org
thakis@chromium.org
thestig@chromium.org
wez@chromium.org
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes.
# per-file rules:
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file BUILD.gn=*
# For Android-specific changes:
per-file ..._android*=file://base/android/OWNERS
# For Fuchsia-specific changes:
per-file ..._fuchsia*=file://build/fuchsia/OWNERS
# For Windows-specific changes:
per-file ..._win*=file://base/win/OWNERS
per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
# Logging-related changes:
per-file check*=olivierli@chromium.org
per-file check*=pbos@chromium.org
per-file dcheck*=olivierli@chromium.org
per-file dcheck*=pbos@chromium.org
per-file logging*=olivierli@chromium.org
per-file logging*=pbos@chromium.org
per-file notreached.h=olivierli@chromium.org
per-file notreached.h=pbos@chromium.org
# Restricted since rand_util.h also backs the cryptographically secure RNG.
per-file rand_util*=set noparent
per-file rand_util*=file://ipc/SECURITY_OWNERS
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS

163
src/base/PRESUBMIT.py Normal file
View File

@ -0,0 +1,163 @@
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/base.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
def CheckChangeLintsClean(input_api, output_api):
"""Makes sure that the code is cpplint clean."""
# lint_filters=[] stops the OFF_BY_DEFAULT_LINT_FILTERS from being disabled,
# finding many more issues. verbose_level=1 finds a small number of additional
# issues.
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
# Only process those extensions which are used in Chromium, in directories
# that currently lint clean.
CLEAN_CPP_FILES_ONLY = (r'base/win/.*\.(cc|h)$', )
source_file_filter = lambda x: input_api.FilterSourceFile(
x,
files_to_check=CLEAN_CPP_FILES_ONLY,
files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, source_file_filter=source_file_filter,
lint_filters=[], verbose_level=1)
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not "/ios/" in f.LocalPath() and
not "/test/" in f.LocalPath() and
not f.LocalPath().endswith('.java') and
not f.LocalPath().endswith('_unittest.mm') and
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
"""Returns locations matching one of the search_regexes."""
def FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
files_to_check=files_to_check,
files_to_skip=files_to_skip)
no_presubmit = r"// no-presubmit-check"
locations = []
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
for search_regex in search_regexes:
if (input_api.re.search(search_regex, line) and
not input_api.re.search(no_presubmit, line)):
locations.append(" %s:%d" % (f.LocalPath(), line_num))
break
return locations
def _CheckNoTraceEventInclude(input_api, output_api):
"""Verify that //base includes base_tracing.h instead of trace event headers.
Checks that files outside trace event implementation include the
base_tracing.h header instead of specific trace event implementation headers
to maintain compatibility with the gn flag "enable_base_tracing = false".
"""
discouraged_includes = [
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
r'^#include "third_party/perfetto/include/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*/test/.*",
r".*/trace_event/.*",
r".*/tracing/.*",
]
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitError(
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
'of trace_event implementation headers. If you need to include an\n' +
'implementation header, verify that "gn check" and base_unittests\n' +
'still pass with gn arg "enable_base_tracing = false" and add\n' +
'"// no-presubmit-check" after the include. \n' +
'\n'.join(locations)) ]
return []
def _WarnPbzeroIncludes(input_api, output_api):
"""Warn to check enable_base_tracing=false when including a pbzero header.
Emits a warning when including a perfetto pbzero header, encouraging the
user to verify that //base still builds with enable_base_tracing=false.
"""
warn_includes = [
r'^#include "third_party/perfetto/protos/',
r'^#include "base/tracing/protos/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*/test/.*",
r".*/trace_event/.*",
r".*/tracing/.*",
]
locations = _FindLocations(input_api, warn_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitPromptWarning(
'Please verify that "gn check" and base_unittests still pass with\n' +
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
'to exclude pbzero headers and anything not supported by\n' +
'//base/trace_event/trace_event_stub.h.\n' +
'\n'.join(locations)) ]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
results.extend(_WarnPbzeroIncludes(input_api, output_api))
results.extend(CheckChangeLintsClean(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results

87
src/base/README.md Normal file
View File

@ -0,0 +1,87 @@
# What is this
Contains a written down set of principles and other information on //base.
Please add to it!
## About //base:
Chromium is a very mature project. Most things that are generally useful are
already here and things not here aren't generally useful.
The bar for adding stuff to base is that it must have demonstrated wide
applicability. Prefer to add things closer to where they're used (i.e. "not
base"), and pull into base only when needed. In a project our size,
sometimes even duplication is OK and inevitable.
Adding a new logging macro `DPVELOG_NE` is not more clear than just
writing the stuff you want to log in a regular logging statement, even
if it makes your calling code longer. Just add it to your own code.
If the code in question does not need to be used inside base, but will have
multiple consumers across the codebase, consider placing it in a new directory
under components/ instead.
base is written for the Chromium project and is not intended to be used
outside it. Using base outside of src.git is explicitly not supported,
and base makes no guarantees about API (or even ABI) stability (like all
other code in Chromium). New code that depends on base/ must be in
src.git. Code that's not in src.git but pulled in through DEPS (for
example, v8) cannot use base.
## Qualifications for being in //base OWNERS
* interest and ability to learn low level/high detail/complex c++ stuff
* inclination to always ask why and understand everything (including external
interactions like win32) rather than just hoping the author did it right
* mentorship/experience
* demonstrated good judgement (esp with regards to public APIs) over a length
of time
Owners are added when a contributor has shown the above qualifications and
when they express interest. There isn't an upper bound on the number of OWNERS.
## Design and naming
* Be sure to use the base namespace.
* STL-like constructs should adhere as closely to STL as possible. Functions
and behaviors not present in STL should only be added when they are related
to the specific data structure implemented by the container.
* For STL-like constructs our policy is that they should use STL-like naming
even when it may conflict with the style guide. So functions and class names
should be lower case with underscores. Non-STL-like classes and functions
should use Google naming.
## Performance testing
Since the primitives provided by //base are used very widely, it is important to
ensure they scale to the necessary workloads and perform well under all
supported platforms. The `base_perftests` target is a suite of
synthetic microbenchmarks that measure performance in various scenarios:
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
queue in isolation.
* ConditionVariablePerfTest: Measures thread switching cost of condition
variables.
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
machinery.
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
* MessageLoopPerfTest: Measures the speed of task posting in various
configurations.
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
* PartitionLockPerfTest: Tests the implementation of Lock used in
PartitionAlloc
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
pthreads.
* RandUtilPerfTest: Measures the time it takes to generate random numbers.
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
underlying task runners.
* TaskObserverPerfTest: Measures the incremental cost of adding task
observers.
* TaskPerfTest: Checks the cost of posting tasks between threads.
* ThreadLocalStoragePerfTest: Exercises different mechanisms for accessing
data associated with the current thread (C++ `thread_local`, the
implementation in //base, the POSIX/WinAPI directly)
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
multithreaded scenarios.
Regressions in these benchmarks can generally by caused by 1) operating system
changes, 2) compiler version or flag changes or 3) changes in //base code
itself.

13
src/base/SECURITY_OWNERS Normal file
View File

@ -0,0 +1,13 @@
# Changes to code that runs at high privilege and which has a high risk of
# memory corruption, such as parsers for complex inputs, require a security
# review to avoid introducing sandbox escapes.
#
# Although this file is in base/, it may apply to more than just base, OWNERS
# files outside of base may also include this file.
#
# Security team: If you are uncomfortable reviewing a particular bit of code
# yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org
rsesek@chromium.org
tsepez@chromium.org

View File

@ -0,0 +1,79 @@
# Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//base/allocator/allocator.gni")
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//build/buildflag_header.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
buildflag_header("buildflags") {
header = "buildflags.h"
assert(use_allocator_shim || !use_partition_alloc_as_malloc,
"PartitionAlloc-Everywhere requires the allocator shim")
flags = [
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
]
}
if (is_apple) {
source_set("early_zone_registration_mac") {
sources = [
"early_zone_registration_mac.cc",
"early_zone_registration_mac.h",
]
deps = [
":buildflags",
"//base/allocator/partition_allocator:buildflags",
]
}
}
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
config("wrap_malloc_symbols") {
ldflags = [
"-Wl,-wrap,calloc",
"-Wl,-wrap,free",
"-Wl,-wrap,malloc",
"-Wl,-wrap,memalign",
"-Wl,-wrap,posix_memalign",
"-Wl,-wrap,pvalloc",
"-Wl,-wrap,realloc",
"-Wl,-wrap,valloc",
# Not allocating memory, but part of the API
"-Wl,-wrap,malloc_usable_size",
# <stdlib.h> functions
"-Wl,-wrap,realpath",
# <string.h> functions
"-Wl,-wrap,strdup",
"-Wl,-wrap,strndup",
# <unistd.h> functions
"-Wl,-wrap,getcwd",
# <stdio.h> functions
"-Wl,-wrap,asprintf",
"-Wl,-wrap,vasprintf",
]
}
config("mac_no_default_new_delete_symbols") {
if (!is_component_build) {
# This is already set when we compile libc++, see
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as well,
# since the shim defines the symbols, to prevent them being exported.
cflags = [ "-fvisibility-global-new-delete-hidden" ]
}
}

View File

@ -0,0 +1,3 @@
monorail {
component: "Internals"
}

View File

@ -0,0 +1,7 @@
lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS

View File

@ -0,0 +1,156 @@
This document describes how malloc / new calls are routed in the various Chrome
platforms.
Bear in mind that the chromium codebase does not always just use `malloc()`.
Some examples:
- Large parts of the renderer (Blink) use two home-brewed allocators,
PartitionAlloc and BlinkGC (Oilpan).
- Some subsystems, such as the V8 JavaScript engine, handle memory management
autonomously.
- Various parts of the codebase use abstractions such as `SharedMemory` or
`DiscardableMemory` which, similarly to the above, have their own page-level
memory management.
Background
----------
The `allocator` target defines at compile-time the platform-specific choice of
the allocator and extra-hooks which services calls to malloc/new. The relevant
build-time flags involved are `use_allocator_shim` and
`use_partition_alloc_as_malloc`.
By default, these are true on all platforms except iOS (not yet supported) and
NaCl (no plan to support).
Furthermore, when building with a sanitizer (e.g. `asan`, `msan`, ...) both the
allocator and the shim layer are disabled.
Layering and build deps
-----------------------
The `allocator` target provides the linker flags required for the Windows shim
layer. The `base` target is (almost) the only one depending on `allocator`. No
other targets should depend on it, with the exception of the very few
executables / dynamic libraries that don't depend, either directly or
indirectly, on `base` within the scope of a linker unit.
More importantly, **no other place outside of `/base` should depend on the
specific allocator**.
If such a functional dependency is required that should be achieved using
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
`/base/memory/`)
**Why `base` depends on `allocator`?**
Because it needs to provide services that depend on the actual allocator
implementation. In the past `base` used to pretend to be allocator-agnostic
and get the dependencies injected by other layers. This ended up being an
inconsistent mess.
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
Linker unit targets (executables and shared libraries) that depend in some way
on `base` (most of the targets in the codebase) automatically get the correct
set of linker flags to pull in the Windows shim-layer (if needed).
Source code
-----------
This directory contains just the allocator (i.e. shim) layer that switches
between the different underlying memory allocation implementations.
Unified allocator shim
----------------------
On most platforms, Chrome overrides the malloc / operator new symbols (and
corresponding free / delete and other variants). This is to enforce security
checks and lately to enable the
[memory-infra heap profiler][url-memory-infra-heap-profiler].
Historically each platform had its special logic for defining the allocator
symbols in different places of the codebase. The unified allocator shim is
a project aimed to unify the symbol definition and allocator routing logic in
a central place.
- Full documentation: [Allocator shim design doc][url-allocator-shim].
- Current state: Available and enabled by default on Android, CrOS, Linux,
Mac OS and Windows.
- Tracking bug: [crbug.com/550886](https://crbug.com/550886).
- Build-time flag: `use_allocator_shim`.
**Overview of the unified allocator shim**
The allocator shim consists of three stages:
```
+-------------------------+ +-----------------------+ +----------------+
| malloc & friends | -> | shim layer | -> | Routing to |
| symbols definition | | implementation | | allocator |
+-------------------------+ +-----------------------+ +----------------+
| - libc symbols (malloc, | | - Security checks | | - glibc |
| calloc, free, ...) | | - Chain of dispatchers| | - Android |
| - C++ symbols (operator | | that can intercept | | bionic |
| new, delete, ...) | | and override | | - WinHeap |
| - glibc weak symbols | | allocations | | - Partition |
| (__libc_malloc, ...) | +-----------------------+ | Alloc |
+-------------------------+ +----------------+
```
**1. malloc symbols definition**
This stage takes care of overriding the symbols `malloc`, `free`,
`operator new`, `operator delete` and friends and routing those calls inside the
allocator shim (next point).
This is taken care of by the headers in `allocator_shim_override_*`.
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
can override in `allocator_shim_override_ucr_symbols_win.h`.
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
`operator delete` and friends).
This enables proper interposition of malloc symbols referenced by the main
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
The Linux/CrOS shim was introduced by
[crrev.com/1675143004](https://crrev.com/1675143004).
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
possible. This is because Android processes are `fork()`-ed from the Android
zygote, which pre-loads libc.so and only later native code gets loaded via
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
scope).
In this case, the approach instead of wrapping symbol resolution at link time
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
The use of this wrapping flag causes:
- All references to allocator symbols in the Chrome codebase to be rewritten as
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
route allocator calls inside the shim layer.
- The reference to the original `malloc` symbols (which typically is defined by
the system's libc.so) are accessible via the special `__real_malloc` and
friends symbols (which will be relocated, at load time, against `malloc`).
In summary, this approach is transparent to the dynamic loader, which still sees
undefined symbol references to malloc symbols.
These symbols will be resolved against libc.so as usual.
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
**2. Shim layer implementation**
This stage contains the actual shim implementation. This consists of:
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
(using the `InsertAllocatorDispatch` API). They can intercept and override
allocator calls.
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
This happens inside `allocator_shim.cc`
**3. Final allocator routing**
The final element of the aforementioned dispatcher chain is statically defined
at build time and ultimately routes the allocator calls to the actual allocator
(as described in the *Background* section above). This is taken care of by the
headers in `allocator_shim_default_dispatch_to_*` files.
Related links
-------------
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing

View File

@ -0,0 +1,52 @@
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//build_overrides/partition_alloc.gni")
if (is_ios) {
import("//build/config/ios/ios_sdk.gni")
}
declare_args() {
# Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = use_allocator_shim_default
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
# binary size. This flag can be used to enable it for official builds too.
force_enable_raw_ptr_exclusion = false
}
assert(
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
is_fuchsia || is_apple,
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
"and Windows.")
if (is_win && use_allocator_shim) {
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
assert(!is_component_build,
"The allocator shim doesn't work for the component build on Windows.")
}
# Chromium-specific asserts. External embedders _may_ elect to use these
# features even without PA-E.
if (!use_partition_alloc_as_malloc) {
# In theory, BackupRefPtr/MTECheckedPtr will work just fine without
# PartitionAlloc-Everywhere, but their scope would be limited to partitions
# that are invoked explicitly (not via malloc). These are only Blink
# partition, where we currently don't even use raw_ptr<T>.
assert(!enable_backup_ref_ptr_support,
"Chromium does not use BRP without PA-E")
assert(!enable_mte_checked_ptr_support,
"Chromium does not use MTECheckedPtr without PA-E")
# Pointer compression works only if all pointers are guaranteed to be
# allocated by PA (in one of its core pools, to be precise). In theory,
# this could be useful with partitions that are invoked explicitly. In
# practice, the pointers we have in mind for compression (scoped_refptr<>,
# unique_ptr<>) require PA-E.
assert(!enable_pointer_compression_support,
"Pointer compressions likely doesn't make sense without PA-E")
}

View File

@ -0,0 +1,38 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_check.h"
#include "base/allocator/buildflags.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include "base/allocator/partition_allocator/shim/winheap_stubs_win.h"
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <malloc.h>
#endif
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/shim/allocator_interception_mac.h"
#endif
namespace base::allocator {
bool IsAllocatorInitialized() {
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
// Set by allocator_shim_override_ucrt_symbols_win.h when the
// shimmed _set_new_mode() is called.
return allocator_shim::g_is_win_shim_layer_initialized;
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// From allocator_interception_mac.mm.
return allocator_shim::g_replaced_default_zone;
#else
return true;
#endif
}
} // namespace base::allocator

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
#include "base/base_export.h"
namespace base {
namespace allocator {
BASE_EXPORT bool IsAllocatorInitialized();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_

View File

@ -0,0 +1,15 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_extension.h"
#include "base/allocator/buildflags.h"
#include "base/check.h"
namespace base {
namespace allocator {
void ReleaseFreeMemory() {}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,23 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#include <stddef.h> // for size_t
#include "base/base_export.h"
#include "build/build_config.h"
namespace base {
namespace allocator {
// Request that the allocator release any free memory it knows about to the
// system.
BASE_EXPORT void ReleaseFreeMemory();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_

View File

@ -0,0 +1,24 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#include <cstddef>
namespace base::allocator::dispatcher::configuration {
// The maximum number of optional observers that may be present depending on
// command line parameters.
constexpr size_t kMaximumNumberOfOptionalObservers = 4;
// The total number of observers including mandatory and optional observers.
// Primarily the number of observers affects the performance at allocation time.
// The current value of 4 doesn't have hard evidence. Keep in mind that
// also a single observer can severely impact performance.
constexpr size_t kMaximumNumberOfObservers = 4;
} // namespace base::allocator::dispatcher::configuration
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_

View File

@ -0,0 +1,357 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/shim/allocator_shim.h"
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/no_destructor.h"
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#if DCHECK_IS_ON()
#include <atomic>
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details {
namespace {
using allocator_shim::AllocatorDispatch;
void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
ReentryGuard guard;
void* address = self->next->alloc_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_unchecked_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_zero_initialized_function(self->next, n, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, n * size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_aligned_function(self->next, alignment, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->realloc_function(self->next, address, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
// Note: The RecordFree should be called before free_function
// (here and in other places).
// That is because we need to remove the recorded allocation sample before
// free_function, as once the latter is executed the address becomes available
// and can be allocated by another thread. That would be racy otherwise.
PoissonAllocationSampler::RecordFree(address);
self->next->free_function(self->next, address, context);
}
size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
bool ClaimedAddressFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->claimed_address_function(self->next, address, context);
}
unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
ReentryGuard guard;
unsigned num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
if (LIKELY(guard)) {
for (unsigned i = 0; i < num_allocated; ++i) {
PoissonAllocationSampler::RecordAlloc(
results[i], size, PoissonAllocationSampler::kMalloc, nullptr);
}
}
return num_allocated;
}
void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i)
PoissonAllocationSampler::RecordFree(to_be_freed[i]);
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->free_definite_size_function(self->next, address, size, context);
}
void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->try_free_default_function(self->next, address, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* address =
self->next->aligned_malloc_function(self->next, size, alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&ClaimedAddressFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&TryFreeDefaultFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
} // namespace
} // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher::partition_allocator_details {
namespace {
void PartitionAllocHook(void* address, size_t size, const char* type) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kPartitionAlloc, type);
}
void PartitionFreeHook(void* address) {
PoissonAllocationSampler::RecordFree(address);
}
} // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher {
void InstallStandardAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::InsertAllocatorDispatch(
&allocator_shim_details::g_allocator_dispatch);
#else
// If the allocator shim isn't available, then we don't install any hooks.
// There's no point in printing an error message, since this can regularly
// happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
}
void RemoveStandardAllocatorHooksForTesting() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::RemoveAllocatorDispatchForTesting(
&allocator_shim_details::g_allocator_dispatch); // IN-TEST
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif
}
} // namespace base::allocator::dispatcher
namespace base::allocator::dispatcher {
// The private implementation of Dispatcher.
struct Dispatcher::Impl {
void Initialize(const internal::DispatchData& dispatch_data) {
#if DCHECK_IS_ON()
DCHECK(!is_initialized_check_flag_.test_and_set());
#endif
dispatch_data_ = dispatch_data;
ConnectToEmitters(dispatch_data_);
}
void Reset() {
#if DCHECK_IS_ON()
DCHECK([&]() {
auto const was_set = is_initialized_check_flag_.test_and_set();
is_initialized_check_flag_.clear();
return was_set;
}());
#endif
DisconnectFromEmitters(dispatch_data_);
dispatch_data_ = {};
}
private:
// Connect the hooks to the memory subsystem. In some cases, most notably when
// we have no observers at all, the hooks will be invalid and must NOT be
// connected. This way we prevent notifications although no observers are
// present.
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator_shim::InsertAllocatorDispatch(allocator_dispatch);
}
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC)
{
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
auto* const free_hook = dispatch_data.GetFreeObserverHook();
if (allocation_hook && free_hook) {
partition_alloc::PartitionAllocHooks::SetObserverHooks(allocation_hook,
free_hook);
}
}
#endif
}
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator_shim::RemoveAllocatorDispatchForTesting(
allocator_dispatch); // IN-TEST
}
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif
}
// Information on the hooks.
internal::DispatchData dispatch_data_;
#if DCHECK_IS_ON()
// Indicator if the dispatcher has been initialized before.
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT;
#else
std::atomic_flag is_initialized_check_flag_;
#endif
#endif
};
Dispatcher::Dispatcher() : impl_(std::make_unique<Impl>()) {}
Dispatcher::~Dispatcher() = default;
Dispatcher& Dispatcher::GetInstance() {
static base::NoDestructor<Dispatcher> instance;
return *instance;
}
void Dispatcher::Initialize(const internal::DispatchData& dispatch_data) {
impl_->Initialize(dispatch_data);
}
void Dispatcher::ResetForTesting() {
impl_->Reset();
}
} // namespace base::allocator::dispatcher

View File

@ -0,0 +1,78 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h"
#include <memory>
namespace base::allocator::dispatcher {
void BASE_EXPORT InstallStandardAllocatorHooks();
void BASE_EXPORT RemoveStandardAllocatorHooksForTesting();
namespace internal {
struct DispatchData;
}
// Dispatcher serves as the top level instance for managing the dispatch
// mechanism. The class instance manages connections to the various memory
// subsystems such as PartitionAlloc. To keep the public interface as lean as
// possible it uses a pimpl pattern.
class BASE_EXPORT Dispatcher {
public:
static Dispatcher& GetInstance();
Dispatcher();
// Initialize the dispatch mechanism with the given tuple of observers. The
// observers must be valid (it is only DCHECKed internally at initialization,
// but not verified further)
// If Initialize is called multiple times, the first one wins. All later
// invocations are silently ignored. Initialization is protected from
// concurrent invocations. In case of concurrent accesses, the first one to
// get the lock wins.
// The dispatcher invokes following functions on the observers:
// void OnAllocation(void* address,
// size_t size,
// AllocationSubsystem sub_system,
// const char* type_name);
// void OnFree(void* address);
//
// Note: The dispatcher mechanism does NOT bring systematic protection against
// recursive invocations. That is, observers which allocate memory on the
// heap, i.e. through dynamically allocated containers or by using the
// CHECK-macro, are responsible to break these recursions!
template <typename... ObserverTypes>
void Initialize(const std::tuple<ObserverTypes...>& observers) {
// Get the hooks for running these observers and pass them to further
// initialization
Initialize(internal::GetNotificationHooks(observers));
}
// The following functions provide an interface to setup and tear down the
// dispatcher when testing. This must NOT be used from production code since
// the hooks cannot be removed reliably under all circumstances.
template <typename ObserverType>
void InitializeForTesting(ObserverType* observer) {
Initialize(std::make_tuple(observer));
}
void ResetForTesting();
private:
// structure and pointer to the private implementation.
struct Impl;
std::unique_ptr<Impl> const impl_;
~Dispatcher();
void Initialize(const internal::DispatchData& dispatch_data);
};
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_

View File

@ -0,0 +1,206 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/tools.h"
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher {
namespace internal {
// Filter the passed observers and perform initialization of the passed
// dispatcher.
template <size_t CurrentIndex,
typename DispatcherType,
typename CheckObserverPredicate,
typename VerifiedObservers,
typename UnverifiedObservers,
size_t... IndicesToSelect>
inline void DoInitialize(DispatcherType& dispatcher,
CheckObserverPredicate check_observer,
const VerifiedObservers& verified_observers,
const UnverifiedObservers& unverified_observers,
std::index_sequence<IndicesToSelect...> indices) {
if constexpr (CurrentIndex < std::tuple_size<UnverifiedObservers>::value) {
// We still have some items left to handle.
if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
// The current observer is valid. Hence, append the index of the current
// item to the set of indices and head on to the next item.
DoInitialize<CurrentIndex + 1>(
dispatcher, check_observer, verified_observers, unverified_observers,
std::index_sequence<IndicesToSelect..., CurrentIndex>{});
} else {
// The current observer is not valid. Hence, head on to the next item with
// an unaltered list of indices.
DoInitialize<CurrentIndex + 1>(dispatcher, check_observer,
verified_observers, unverified_observers,
indices);
}
} else if constexpr (CurrentIndex ==
std::tuple_size<UnverifiedObservers>::value) {
// So we have met the end of the tuple of observers to verify.
// Hence, we extract the additional valid observers, append to the tuple of
// already verified observers and hand over to the dispatcher.
auto observers = std::tuple_cat(
verified_observers,
std::make_tuple(std::get<IndicesToSelect>(unverified_observers)...));
// Do a final check that neither the maximum total number of observers nor
// the maximum number of optional observers is exceeded.
static_assert(std::tuple_size<decltype(observers)>::value <=
configuration::kMaximumNumberOfObservers);
static_assert(sizeof...(IndicesToSelect) <=
configuration::kMaximumNumberOfOptionalObservers);
dispatcher.Initialize(std::move(observers));
}
}
} // namespace internal
// The result of concatenating two tuple-types.
template <typename... tuples>
using TupleCat = decltype(std::tuple_cat(std::declval<tuples>()...));
// Initializer collects mandatory and optional observers and initializes the
// passed Dispatcher with only the enabled observers.
//
// In some situations, presence of observers depends on runtime. i.e. command
// line parameters or CPU features. With 3 optional observers we already have 8
// different combinations. Initializer takes the job of dealing with all
// combinations from the user. It allows users to pass all observers (including
// nullptr for disabled optional observers) and initializes the Dispatcher with
// only the enabled observers.
//
// Since this process results in a combinatoric explosion, Initializer
// distinguishes between optional and mandatory observers. Mandatory observers
// are not included in the filtering process and must always be enabled (not
// nullptr).
//
// To allow the Initializer to track the number and exact type of observers, it
// is implemented as a templated class which holds information on the types in
// the std::tuples passed as template parameters. Therefore, whenever any type
// observer it set, the initializer changes its type to reflect this.
template <typename MandatoryObservers = std::tuple<>,
typename OptionalObservers = std::tuple<>>
struct BASE_EXPORT Initializer {
Initializer() = default;
Initializer(MandatoryObservers mandatory_observers,
OptionalObservers optional_observers)
: mandatory_observers_(std::move(mandatory_observers)),
optional_observers_(std::move(optional_observers)) {}
// Set the mandatory observers. The number of observers that can be set is
// limited by configuration::maximum_number_of_observers.
template <typename... NewMandatoryObservers,
std::enable_if_t<
internal::LessEqual((sizeof...(NewMandatoryObservers) +
std::tuple_size<OptionalObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
SetMandatoryObservers(NewMandatoryObservers*... mandatory_observers) const {
return {std::make_tuple(mandatory_observers...), GetOptionalObservers()};
}
// Add mandatory observers. The number of observers that can be added is
// limited by the current number of observers, see
// configuration::maximum_number_of_observers.
template <typename... AdditionalMandatoryObservers,
std::enable_if_t<internal::LessEqual(
std::tuple_size<MandatoryObservers>::value +
sizeof...(AdditionalMandatoryObservers) +
std::tuple_size<OptionalObservers>::value,
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<TupleCat<MandatoryObservers,
std::tuple<AdditionalMandatoryObservers*...>>,
OptionalObservers>
AddMandatoryObservers(
AdditionalMandatoryObservers*... additional_mandatory_observers) const {
return {std::tuple_cat(GetMandatoryObservers(),
std::make_tuple(additional_mandatory_observers...)),
GetOptionalObservers()};
}
// Set the optional observers. The number of observers that can be set is
// limited by configuration::maximum_number_of_optional_observers as well as
// configuration::maximum_number_of_observers.
template <
typename... NewOptionalObservers,
std::enable_if_t<
internal::LessEqual(
sizeof...(NewOptionalObservers),
configuration::kMaximumNumberOfOptionalObservers) &&
internal::LessEqual((sizeof...(NewOptionalObservers) +
std::tuple_size<MandatoryObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
SetOptionalObservers(NewOptionalObservers*... optional_observers) const {
return {GetMandatoryObservers(), std::make_tuple(optional_observers...)};
}
// Add optional observers. The number of observers that can be added is
// limited by the current number of optional observers,
// configuration::maximum_number_of_optional_observers as well as
// configuration::maximum_number_of_observers.
template <
typename... AdditionalOptionalObservers,
std::enable_if_t<
internal::LessEqual(
std::tuple_size<OptionalObservers>::value +
sizeof...(AdditionalOptionalObservers),
configuration::kMaximumNumberOfOptionalObservers) &&
internal::LessEqual((std::tuple_size<OptionalObservers>::value +
sizeof...(AdditionalOptionalObservers) +
std::tuple_size<MandatoryObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<
MandatoryObservers,
TupleCat<OptionalObservers, std::tuple<AdditionalOptionalObservers*...>>>
AddOptionalObservers(
AdditionalOptionalObservers*... additional_optional_observers) const {
return {GetMandatoryObservers(),
std::tuple_cat(GetOptionalObservers(),
std::make_tuple(additional_optional_observers...))};
}
// Perform the actual initialization on the passed dispatcher.
// The dispatcher is passed as a template only to provide better testability.
template <typename DispatcherType>
void DoInitialize(DispatcherType& dispatcher) const {
internal::DoInitialize<0>(dispatcher, internal::IsValidObserver{},
GetMandatoryObservers(), GetOptionalObservers(),
{});
}
const MandatoryObservers& GetMandatoryObservers() const {
return mandatory_observers_;
}
const OptionalObservers& GetOptionalObservers() const {
return optional_observers_;
}
private:
MandatoryObservers mandatory_observers_;
OptionalObservers optional_observers_;
};
// Convenience function for creating an empty Initializer.
inline Initializer<> CreateInitializer() {
return {};
}
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_

View File

@ -0,0 +1,42 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_PARTITION_ALLOC)
DispatchData& DispatchData::SetAllocationObserverHooks(
AllocationObserverHook* allocation_observer_hook,
FreeObserverHook* free_observer_hook) {
allocation_observer_hook_ = allocation_observer_hook;
free_observer_hook_ = free_observer_hook;
return *this;
}
DispatchData::AllocationObserverHook* DispatchData::GetAllocationObserverHook()
const {
return allocation_observer_hook_;
}
DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
return free_observer_hook_;
}
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& DispatchData::SetAllocatorDispatch(
AllocatorDispatch* allocator_dispatch) {
allocator_dispatch_ = allocator_dispatch;
return *this;
}
AllocatorDispatch* DispatchData::GetAllocatorDispatch() const {
return allocator_dispatch_;
}
#endif
} // namespace base::allocator::dispatcher::internal

View File

@ -0,0 +1,59 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/base_export.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/partition_alloc.h"
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/partition_allocator/shim/allocator_shim.h"
#endif
namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
using allocator_shim::AllocatorDispatch;
#endif
// A simple utility class to pass all the information required to properly hook
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
struct BASE_EXPORT DispatchData {
#if BUILDFLAG(USE_PARTITION_ALLOC)
using AllocationObserverHook =
partition_alloc::PartitionAllocHooks::AllocationObserverHook;
using FreeObserverHook =
partition_alloc::PartitionAllocHooks::FreeObserverHook;
DispatchData& SetAllocationObserverHooks(AllocationObserverHook*,
FreeObserverHook*);
AllocationObserverHook* GetAllocationObserverHook() const;
FreeObserverHook* GetFreeObserverHook() const;
private:
AllocationObserverHook* allocation_observer_hook_ = nullptr;
FreeObserverHook* free_observer_hook_ = nullptr;
public:
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
AllocatorDispatch* GetAllocatorDispatch() const;
private:
AllocatorDispatch* allocator_dispatch_ = nullptr;
#endif
};
} // namespace base::allocator::dispatcher::internal
#endif

View File

@ -0,0 +1,372 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/dispatcher/internal/tools.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/dispatcher/subsystem.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/partition_alloc.h"
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/partition_allocator/shim/allocator_shim.h"
#endif
#include <tuple>
namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
using allocator_shim::AllocatorDispatch;
#endif
template <typename CheckObserverPredicate,
typename... ObserverTypes,
size_t... Indices>
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
CheckObserverPredicate check_observer) {
([](bool b) { DCHECK(b); }(check_observer(std::get<Indices>(observers))),
...);
}
template <typename... ObserverTypes, size_t... Indices>
ALWAYS_INLINE void PerformAllocationNotification(
const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
void* address,
size_t size,
AllocationSubsystem subSystem,
const char* type_name) {
((std::get<Indices>(observers)->OnAllocation(address, size, subSystem,
type_name)),
...);
}
template <typename... ObserverTypes, size_t... Indices>
ALWAYS_INLINE void PerformFreeNotification(
const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
void* address) {
((std::get<Indices>(observers)->OnFree(address)), ...);
}
// DispatcherImpl provides hooks into the various memory subsystems. These hooks
// are responsible for dispatching any notification to the observers.
// In order to provide as many information on the exact type of the observer and
// prevent any conditional jumps in the hot allocation path, observers are
// stored in a std::tuple. DispatcherImpl performs a CHECK at initialization
// time to ensure they are valid.
template <typename... ObserverTypes>
struct DispatcherImpl {
using AllObservers = std::index_sequence_for<ObserverTypes...>;
template <std::enable_if_t<
internal::LessEqual(sizeof...(ObserverTypes),
configuration::kMaximumNumberOfObservers),
bool> = true>
static DispatchData GetNotificationHooks(
std::tuple<ObserverTypes*...> observers) {
s_observers = std::move(observers);
PerformObserverCheck(s_observers, AllObservers{}, IsValidObserver{});
return CreateDispatchData();
}
private:
static DispatchData CreateDispatchData() {
return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
&PartitionAllocatorFreeHook)
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(&allocator_dispatch_)
#endif
;
}
#if BUILDFLAG(USE_PARTITION_ALLOC)
static void PartitionAllocatorAllocationHook(void* address,
size_t size,
const char* type_name) {
DoNotifyAllocation(address, size, AllocationSubsystem::kPartitionAllocator,
type_name);
}
static void PartitionAllocatorFreeHook(void* address) {
DoNotifyFree(address);
}
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_function(self->next, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* const address =
self->next->alloc_unchecked_function(self->next, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_zero_initialized_function(
self->next, n, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, n * size,
AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
// Note: ReentryGuard prevents from recursions introduced by malloc and
// initialization of thread local storage which happen in the allocation
// path only (please see docs of ReentryGuard for full details). Therefore,
// the DoNotifyFree doesn't need to be guarded. Instead, making it unguarded
// also ensures proper notification.
DoNotifyFree(address);
void* const reallocated_address =
self->next->realloc_function(self->next, address, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(reallocated_address, size,
AllocationSubsystem::kAllocatorShim);
}
return reallocated_address;
}
static void FreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
// Note: The RecordFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread.
// That would be racy otherwise.
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->free_function(self->next, address, context);
}
static size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
static bool ClaimedAddressFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->claimed_address_function(self->next, address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
ReentryGuard guard;
unsigned const num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
if (LIKELY(guard)) {
for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocation(results[i], size,
AllocationSubsystem::kAllocatorShim);
}
}
return num_allocated;
}
static void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFree(to_be_freed[i]);
}
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->free_definite_size_function(self->next, address, size, context);
}
static void TryFreeDefaultFn(const AllocatorDispatch* self,
void* address,
void* context) {
DoNotifyFree(address);
self->next->try_free_default_function(self->next, address, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
// Note: DoNotifyFree doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
// Instead, making it unguarded also ensures proper notification of the free
// portion.
DoNotifyFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->aligned_free_function(self->next, address, context);
}
static AllocatorDispatch allocator_dispatch_;
#endif
static ALWAYS_INLINE void DoNotifyAllocation(
void* address,
size_t size,
AllocationSubsystem subSystem,
const char* type_name = nullptr) {
PerformAllocationNotification(s_observers, AllObservers{}, address, size,
subSystem, type_name);
}
static ALWAYS_INLINE void DoNotifyFree(void* address) {
PerformFreeNotification(s_observers, AllObservers{}, address);
}
static std::tuple<ObserverTypes*...> s_observers;
};
template <typename... ObserverTypes>
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&ClaimedAddressFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&TryFreeDefaultFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
#endif
// Specialization of DispatcherImpl in case we have no observers to notify. In
// this special case we return a set of null pointers as the Dispatcher must not
// install any hooks at all.
template <>
struct DispatcherImpl<> {
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(nullptr, nullptr)
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(nullptr)
#endif
;
}
};
// A little utility function that helps using DispatcherImpl by providing
// automated type deduction for templates.
template <typename... ObserverTypes>
inline DispatchData GetNotificationHooks(
std::tuple<ObserverTypes*...> observers) {
return DispatcherImpl<ObserverTypes...>::GetNotificationHooks(
std::move(observers));
}
} // namespace base::allocator::dispatcher::internal
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_

View File

@ -0,0 +1,29 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
#include <cstddef>
namespace base::allocator::dispatcher::internal {
constexpr bool LessEqual(size_t lhs, size_t rhs) {
return lhs <= rhs;
}
constexpr bool Equal(size_t lhs, size_t rhs) {
return lhs == rhs;
}
struct IsValidObserver {
template <typename T>
constexpr bool operator()(T const* ptr) const noexcept {
return ptr != nullptr;
}
};
} // namespace base::allocator::dispatcher::internal
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_H_

View File

@ -0,0 +1,34 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
#endif
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
pthread_key_t ReentryGuard::entered_key_ = 0;
void ReentryGuard::InitTLSSlot() {
if (entered_key_ == 0) {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
}
DCHECK(entered_key_ != 0);
}
#else
void ReentryGuard::InitTLSSlot() {}
#endif
} // namespace base::allocator::dispatcher

View File

@ -0,0 +1,65 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
#endif
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
// The macOS implementation of libmalloc sometimes calls malloc recursively,
// delegating allocations between zones. That causes our hooks being called
// twice. The scoped guard allows us to detect that.
//
// Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables.
// Make use of pthread TLS instead of C++ thread_local there.
struct BASE_EXPORT ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
}
~ReentryGuard() {
if (LIKELY(allowed_))
pthread_setspecific(entered_key_, nullptr);
}
explicit operator bool() const noexcept { return allowed_; }
// This function must be called in very early of the process start-up in
// order to acquire a low TLS slot number because glibc TLS implementation
// will require a malloc call to allocate storage for a higher slot number
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
static void InitTLSSlot();
private:
static pthread_key_t entered_key_;
const bool allowed_;
};
#else
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
// ReentryGuard above will otherwise trigger the "unused code" warnings.
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; }
static void InitTLSSlot();
};
#endif
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_

View File

@ -0,0 +1,21 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
namespace base::allocator::dispatcher {
// Identifiers for the memory subsystem handling the allocation. Some observers
// require more detailed information on who is performing the allocation, i.e.
// SamplingHeapProfiler.
enum class AllocationSubsystem {
// Allocation is handled by PartitionAllocator.
kPartitionAllocator = 1,
// Allocation is handled by AllocatorShims.
kAllocatorShim = 2
};
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_

View File

@ -0,0 +1,27 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#include "testing/gtest/include/gtest/gtest.h"
namespace base::allocator::dispatcher::testing {
// DispatcherTest provides some common initialization which most of the
// unittests of the dispatcher require. DispatcherTest should not be used
// directly. Instead, derive your test fixture from it.
struct DispatcherTest : public ::testing::Test {
// Perform some commonly required initialization, at them moment
// - Initialize the TLS slot for the ReentryGuard
DispatcherTest();
protected:
// Protected d'tor only to prevent direct usage of this class.
~DispatcherTest() override;
};
} // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_

View File

@ -0,0 +1,32 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
#include "base/allocator/dispatcher/subsystem.h"
#include "testing/gmock/include/gmock/gmock.h"
#include <cstddef>
namespace base::allocator::dispatcher::testing {
// ObserverMock is a small mock class based on GoogleMock.
// It complies to the interface enforced by the dispatcher. The template
// parameter serves only to create distinct types of observers if required.
template <typename T = void>
struct ObserverMock {
MOCK_METHOD(void,
OnAllocation,
(void* address,
size_t size,
AllocationSubsystem sub_system,
const char* type_name),
());
MOCK_METHOD(void, OnFree, (void* address), ());
};
} // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_

View File

@ -0,0 +1,50 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
#include <array>
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher::testing {
namespace internal {
template <size_t Size, typename Type, typename... AppendedTypes>
struct DefineTupleFromSingleType {
using type = typename DefineTupleFromSingleType<Size - 1,
Type,
AppendedTypes...,
Type>::type;
};
template <typename Type, typename... AppendedTypes>
struct DefineTupleFromSingleType<0, Type, AppendedTypes...> {
using type = std::tuple<AppendedTypes...>;
};
} // namespace internal
template <size_t Size, typename Type>
struct DefineTupleFromSingleType {
using type = typename internal::DefineTupleFromSingleType<Size, Type>::type;
};
template <typename Type, size_t Size, size_t... Indices>
typename internal::DefineTupleFromSingleType<Size, Type*>::type
CreateTupleOfPointers(std::array<Type, Size>& items,
std::index_sequence<Indices...>) {
return std::make_tuple((&items[Indices])...);
}
template <typename Type, size_t Size>
typename internal::DefineTupleFromSingleType<Size, Type*>::type
CreateTupleOfPointers(std::array<Type, Size>& items) {
return CreateTupleOfPointers(items, std::make_index_sequence<Size>{});
}
} // namespace base::allocator::dispatcher::testing
#endif

View File

@ -0,0 +1,78 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/tls.h"
#if USE_LOCAL_TLS_EMULATION()
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/immediate_crash.h"
#include <sys/mman.h>
namespace base::allocator::dispatcher::internal {
void* MMapAllocator::AllocateMemory(size_t size_in_bytes) {
void* const mmap_res = mmap(nullptr, size_in_bytes, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
return (mmap_res != MAP_FAILED) ? mmap_res : nullptr;
}
bool MMapAllocator::FreeMemoryForTesting(void* pointer_to_allocated,
size_t size_in_bytes) {
auto const munmap_res = munmap(pointer_to_allocated, size_in_bytes);
return (munmap_res == 0);
}
bool PThreadTLSSystem::Setup(
OnThreadTerminationFunction thread_termination_function) {
#if DCHECK_IS_ON()
// Initialize must happen outside of the allocation path. Therefore, it is
// secure to verify with DCHECK.
DCHECK(!initialized_.exchange(true, std::memory_order_acq_rel));
#endif
auto const key_create_res =
pthread_key_create(&data_access_key_, thread_termination_function);
// On some platforms creating a new pthread-key requires an allocation when a
// given number of keys has been created. I.e. in glibc this limit is denoted
// by PTHREAD_KEY_2NDLEVEL_SIZE. However, this value is neither present on all
// systems nor accessible from here. Hence, we do not do any checks here.
// However, we strongly recommend to setup the TLS system as early as possible
// to avoid exceeding this limit.
return (0 == key_create_res);
}
bool PThreadTLSSystem::TearDownForTesting() {
auto const key_delete_res = pthread_key_delete(data_access_key_);
return (0 == key_delete_res);
}
void* PThreadTLSSystem::GetThreadSpecificData() {
#if DCHECK_IS_ON()
if (!initialized_.load(std::memory_order_acquire)) {
return nullptr;
}
#endif
return pthread_getspecific(data_access_key_);
}
bool PThreadTLSSystem::SetThreadSpecificData(void* data) {
#if DCHECK_IS_ON()
if (!initialized_.load(std::memory_order_acquire)) {
return false;
}
#endif
return (0 == pthread_setspecific(data_access_key_, data));
}
} // namespace base::allocator::dispatcher::internal
#endif // USE_LOCAL_TLS_EMULATION()

View File

@ -0,0 +1,418 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TLS_H_
#define BASE_ALLOCATOR_DISPATCHER_TLS_H_
#include "build/build_config.h"
#if BUILDFLAG(IS_POSIX) // the current allocation mechanism (mmap) and TLS
// support (pthread) are both defined by POSIX
#define USE_LOCAL_TLS_EMULATION() true
#else
#define USE_LOCAL_TLS_EMULATION() false
#endif
#if USE_LOCAL_TLS_EMULATION()
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <mutex>
#include <pthread.h>
#if HAS_FEATURE(thread_sanitizer)
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
#else
#define DISABLE_TSAN_INSTRUMENTATION
#endif
namespace base::allocator::dispatcher {
namespace internal {
// Allocate memory using POSIX' mmap and unmap functionality. The allocator
// implements the allocator interface required by ThreadLocalStorage.
struct BASE_EXPORT MMapAllocator {
// The minimum size of a memory chunk when allocating. Even for chunks with
// fewer bytes, at least AllocationChunkSize bytes are allocated. For mmap, this
// is usually the page size of the system.
// For various OS-CPU combinations, partition_alloc::PartitionPageSize() is not
// constexpr. Hence, we can not use this value but define it locally.
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR) && \
PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
constexpr static size_t AllocationChunkSize =
partition_alloc::PartitionPageSize();
#elif BUILDFLAG(IS_APPLE)
constexpr static size_t AllocationChunkSize = 16384;
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
constexpr static size_t AllocationChunkSize = 16384;
#else
constexpr static size_t AllocationChunkSize = 4096;
#endif
// Allocate size_in_bytes bytes of raw memory. Return nullptr if allocation
// fails.
void* AllocateMemory(size_t size_in_bytes);
// Free the raw memory pointed to by pointer_to_allocated. Returns a boolean
// value indicating if the free was successful.
bool FreeMemoryForTesting(void* pointer_to_allocated, size_t size_in_bytes);
};
// The allocator used by default for the thread local storage.
using DefaultAllocator = MMapAllocator;
using OnThreadTerminationFunction = void (*)(void*);
// The TLS system used by default for the thread local storage. It stores and
// retrieves thread specific data pointers.
struct BASE_EXPORT PThreadTLSSystem {
// Initialize the TLS system to store a data set for different threads.
// @param thread_termination_function An optional function which will be
// invoked upon termination of a thread.
bool Setup(OnThreadTerminationFunction thread_termination_function);
// Tear down the TLS system. After completing tear down, the thread
// termination function passed to Setup will not be invoked anymore.
bool TearDownForTesting();
// Get the pointer to the data associated to the current thread. Returns
// nullptr if the TLS system is not initialized or no data was set before.
void* GetThreadSpecificData();
// Set the pointer to the data associated to the current thread. Return true
// if stored successfully, false otherwise.
bool SetThreadSpecificData(void* data);
private:
pthread_key_t data_access_key_ = 0;
#if DCHECK_IS_ON()
// From POSIX standard at https://www.open-std.org/jtc1/sc22/open/n4217.pdf:
// The effect of calling pthread_getspecific() or pthread_setspecific() with a
// key value not obtained from pthread_key_create() or after key has been
// deleted with pthread_key_delete() is undefined.
//
// Unfortunately, POSIX doesn't define a special value of pthread_key_t
// indicating an invalid key which would allow us to detect accesses outside
// of initialized state. Hence, to prevent us from drifting into the evil
// realm of undefined behaviour we store whether we're somewhere between Setup
// and Teardown.
std::atomic_bool initialized_{false};
#endif
};
using DefaultTLSSystem = PThreadTLSSystem;
// In some scenarios, most notably when testing, the allocator and TLS system
// passed to |ThreadLocalStorage| are not copyable and have to be wrapped, i.e.
// using std::reference_wrapper. |dereference| is a small helper to retrieve the
// underlying value.
template <typename T>
T& dereference(T& ref) {
return ref;
}
template <typename T>
T& dereference(std::reference_wrapper<T>& ref) {
// std::reference_wrapper requires a valid reference for construction,
// therefore, no need in checking here.
return ref.get();
}
// Store thread local data. The data is organized in chunks, where each chunk
// holds |ItemsPerChunk|. Each item may be free or used.
//
// When a thread requests data, the chunks are searched for a free data item,
// which is registered for this thread and marked as |used|. Further requests by
// this thread will then always return the same item. When a thread terminates,
// the item will be reset and return to the pool of free items.
//
// Upon construction, the first chunk is created. If a thread requests data and
// there is no free item available, another chunk is created. Upon destruction,
// all memory is freed. Pointers to data items become invalid!
//
// Constructor and destructor are not thread safe.
//
// @tparam PayloadType The item type to be stored.
// @tparam AllocatorType The allocator being used. An allocator must provide
// the following interface:
// void* AllocateMemory(size_t size_in_bytes); // Allocate size_in_bytes bytes
// of raw memory.
// void FreeMemory(void* pointer_to_allocated, size_t size_in_bytes); // Free
// the raw memory pointed to by pointer_to_allocated.
// Any failure in allocation or free must terminate the process.
// @tparam TLSSystemType The TLS system being used. A TLS system must provide
// the following interface:
// bool Setup(OnThreadTerminationFunction thread_termination_function);
// bool Destroy();
// void* GetThreadSpecificData();
// bool SetThreadSpecificData(void* data);
// @tparam AllocationChunkSize The minimum size of a memory chunk that the
// allocator can handle. We try to size the chunks so that each chunk uses this
// size to the maximum.
// @tparam IsDestructibleForTesting For testing purposes we allow the destructor
// to perform clean up upon destruction. Otherwise, using the destructor will
// result in a compilation failure.
template <typename PayloadType,
typename AllocatorType,
typename TLSSystemType,
size_t AllocationChunkSize,
bool IsDestructibleForTesting>
struct ThreadLocalStorage {
ThreadLocalStorage() : root_(AllocateAndInitializeChunk()) { Initialize(); }
// Create a new instance of |ThreadLocalStorage| using the passed allocator
// and TLS system. This initializes the underlying TLS system and creates the
// first chunk of data.
ThreadLocalStorage(AllocatorType allocator, TLSSystemType tlsSystem)
: allocator_(std::move(allocator)),
tls_system_(std::move(tlsSystem)),
root_(AllocateAndInitializeChunk()) {
Initialize();
}
// Deletes an instance of |ThreadLocalStorage| and delete all the data chunks
// created.
~ThreadLocalStorage() {
if constexpr (IsDestructibleForTesting) {
TearDownForTesting();
} else if constexpr (!IsDestructibleForTesting) {
static_assert(
IsDestructibleForTesting,
"ThreadLocalStorage cannot be destructed outside of test code.");
}
}
// Explicitly prevent all forms of Copy/Move construction/assignment. For an
// exact copy of ThreadLocalStorage we would need to copy the mapping of
// thread to item, which we can't do at the moment. On the other side, our
// atomic members do not support moving out of the box.
ThreadLocalStorage(const ThreadLocalStorage&) = delete;
ThreadLocalStorage(ThreadLocalStorage&& other) = delete;
ThreadLocalStorage& operator=(const ThreadLocalStorage&) = delete;
ThreadLocalStorage& operator=(ThreadLocalStorage&&) = delete;
// Get the data item for the current thread. If no data is registered so far,
// find a free item in the chunks and register it for the current thread.
DISABLE_TSAN_INSTRUMENTATION PayloadType* GetThreadLocalData() {
auto& tls_system = dereference(tls_system_);
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
if (UNLIKELY(slot == nullptr)) {
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
CHECK(tls_system.SetThreadSpecificData(slot));
// Reset the content to wipe out any previous data.
slot->item = {};
}
return &(slot->item);
}
private:
// Encapsulate the payload item and some administrative data.
struct SingleSlot {
PayloadType item;
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
std::atomic_flag is_used = ATOMIC_FLAG_INIT;
#else
std::atomic_flag is_used;
#endif
};
template <size_t NumberOfItems>
struct ChunkT {
SingleSlot slots[NumberOfItems];
// Pointer to the next chunk.
std::atomic<ChunkT*> next_chunk = nullptr;
// Helper flag to ensure we create the next chunk only once in a multi
// threaded environment.
std::once_flag create_next_chunk_flag;
};
template <size_t LowerNumberOfItems,
size_t UpperNumberOfItems,
size_t NumberOfBytes>
static constexpr size_t CalculateEffectiveNumberOfItemsBinSearch() {
if constexpr (LowerNumberOfItems == UpperNumberOfItems) {
return LowerNumberOfItems;
}
constexpr size_t CurrentNumberOfItems =
(UpperNumberOfItems - LowerNumberOfItems) / 2 + LowerNumberOfItems;
if constexpr (sizeof(ChunkT<CurrentNumberOfItems>) > NumberOfBytes) {
return CalculateEffectiveNumberOfItemsBinSearch<
LowerNumberOfItems, CurrentNumberOfItems, NumberOfBytes>();
}
if constexpr (sizeof(ChunkT<CurrentNumberOfItems + 1>) < NumberOfBytes) {
return CalculateEffectiveNumberOfItemsBinSearch<
CurrentNumberOfItems + 1, UpperNumberOfItems, NumberOfBytes>();
}
return CurrentNumberOfItems;
}
// Calculate the maximum number of items we can store in one chunk without the
// size of the chunk exceeding NumberOfBytes. To avoid things like alignment
// and packing tampering with the calculation, instead of calculating the
// correct number of items we use sizeof-operator against ChunkT to search for
// the correct size. Unfortunately, the number of recursions is limited by the
// compiler. Therefore, we use a binary search instead of a simple linear
// search.
template <size_t MinimumNumberOfItems, size_t NumberOfBytes>
static constexpr size_t CalculateEffectiveNumberOfItems() {
if constexpr (sizeof(ChunkT<MinimumNumberOfItems>) < NumberOfBytes) {
constexpr size_t LowerNumberOfItems = MinimumNumberOfItems;
constexpr size_t UpperNumberOfItems =
NumberOfBytes / sizeof(PayloadType) + 1;
return CalculateEffectiveNumberOfItemsBinSearch<
LowerNumberOfItems, UpperNumberOfItems, NumberOfBytes>();
}
return MinimumNumberOfItems;
}
public:
// The minimum number of items per chunk. It should be high enough to
// accommodate most items in the root chunk whilst not wasting to much space
// on unnecessary items.
static constexpr size_t MinimumNumberOfItemsPerChunk = 75;
// The effective number of items per chunk. We use the AllocationChunkSize as
// a hint to calculate to effective number of items so we occupy one of these
// memory chunks to the maximum extent possible.
static constexpr size_t ItemsPerChunk =
CalculateEffectiveNumberOfItems<MinimumNumberOfItemsPerChunk,
AllocationChunkSize>();
private:
using Chunk = ChunkT<ItemsPerChunk>;
static_assert(ItemsPerChunk >= MinimumNumberOfItemsPerChunk);
// Mark an item's slot ready for reuse. This function is used as thread
// termination function in the TLS system. We do not destroy anything at this
// point but simply mark the slot as unused.
static void MarkSlotAsFree(void* data) {
// We always store SingleSlots in the TLS system. Therefore, we cast to
// SingleSlot and reset the is_used flag.
auto* const slot = static_cast<SingleSlot*>(data);
DCHECK(slot && slot->is_used.test_and_set());
slot->is_used.clear(std::memory_order_relaxed);
}
// Perform common initialization during construction of an instance.
void Initialize() {
// The constructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK.
// Passing MarkSlotAsFree as thread_termination_function we ensure the
// slot/item assigned to the finished thread will be returned to the pool of
// unused items.
CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree));
}
Chunk* AllocateAndInitializeChunk() {
void* const uninitialized_memory =
dereference(allocator_).AllocateMemory(sizeof(Chunk));
CHECK(uninitialized_memory);
return new (uninitialized_memory) Chunk{};
}
void FreeAndDeallocateChunkForTesting(Chunk* chunk_to_erase) {
chunk_to_erase->~Chunk();
CHECK(dereference(allocator_)
.FreeMemoryForTesting(chunk_to_erase, sizeof(Chunk)));
}
// Find a free slot in the passed chunk, reserve it and return it to the
// caller. If no free slot can be found, head on to the next chunk. If the
// next chunk doesn't exist, create it.
SingleSlot* FindAndAllocateFreeSlot(Chunk* const chunk) {
SingleSlot* const slot = std::find_if_not(
std::begin(chunk->slots), std::end(chunk->slots),
[](SingleSlot& candidate_slot) {
return candidate_slot.is_used.test_and_set(std::memory_order_relaxed);
});
// So we found a slot. Happily return it to the caller.
if (slot != std::end(chunk->slots)) {
return slot;
}
// Ok, there are no more free slots in this chunk. First, ensure the next
// chunk is valid and create one if necessary.
std::call_once(chunk->create_next_chunk_flag, [&] {
// From https://eel.is/c++draft/thread.once.callonce#3
//
// Synchronization: For any given once_­flag: all active executions occur
// in a total order; completion of an active execution synchronizes with
// the start of the next one in this total order; and the returning
// execution synchronizes with the return from all passive executions.
//
// Therefore, we do only a relaxed store here, call_once synchronizes with
// other threads.
chunk->next_chunk.store(AllocateAndInitializeChunk(),
std::memory_order_relaxed);
});
return FindAndAllocateFreeSlot(chunk->next_chunk);
}
template <bool IsDestructibleForTestingP = IsDestructibleForTesting>
typename std::enable_if<IsDestructibleForTestingP>::type
TearDownForTesting() {
// The destructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK.
// All accessing threads must be terminated by now. For additional security
// we tear down the TLS system first. This way we ensure that
// MarkSlotAsFree is not called anymore and we have no accesses from the
// TLS system's side.
CHECK(dereference(tls_system_).TearDownForTesting());
// Delete all data chunks.
for (auto* chunk = root_.load(); chunk != nullptr;) {
auto* next_chunk = chunk->next_chunk.load();
FreeAndDeallocateChunkForTesting(chunk);
chunk = next_chunk;
}
}
AllocatorType allocator_;
TLSSystemType tls_system_;
std::atomic<Chunk*> const root_;
};
} // namespace internal
// The ThreadLocalStorage visible to the user. This uses the internal default
// allocator and TLS system.
template <typename StorageType,
typename AllocatorType = internal::DefaultAllocator,
typename TLSSystemType = internal::DefaultTLSSystem,
size_t AllocationChunkSize = AllocatorType::AllocationChunkSize,
bool IsDestructibleForTesting = false>
using ThreadLocalStorage =
internal::ThreadLocalStorage<StorageType,
AllocatorType,
TLSSystemType,
AllocationChunkSize,
IsDestructibleForTesting>;
} // namespace base::allocator::dispatcher
#endif // USE_LOCAL_TLS_EMULATION()
#endif // BASE_ALLOCATOR_DISPATCHER_TLS_H_

View File

@ -0,0 +1,262 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/early_zone_registration_mac.h"
#include <mach/mach.h>
#include <malloc/malloc.h>
#include "base/allocator/buildflags.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
#if defined(BASE_EXPORT)
#error "This file cannot depend on //base"
#endif
namespace partition_alloc {
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void EarlyMallocZoneRegistration() {}
void AllowDoublePartitionAllocZoneRegistration() {}
#else
extern "C" {
// abort_report_np() records the message in a special section that both the
// system CrashReporter and Crashpad collect in crash reports. See also in
// chrome_exe_main_mac.cc.
void abort_report_np(const char* fmt, ...);
}
namespace {
malloc_zone_t* GetDefaultMallocZone() {
// malloc_default_zone() does not return... the default zone, but the
// initial one. The default one is the first element of the default zone
// array.
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
if (result != KERN_SUCCESS)
abort_report_np("Cannot enumerate malloc() zones");
return reinterpret_cast<malloc_zone_t*>(zones[0]);
}
} // namespace
void EarlyMallocZoneRegistration() {
// Must have static storage duration, as raw pointers are passed to
// libsystem_malloc.
static malloc_zone_t g_delegating_zone;
static malloc_introspection_t g_delegating_zone_introspect;
static malloc_zone_t* g_default_zone;
// Make sure that the default zone is instantiated.
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
g_default_zone = GetDefaultMallocZone();
// The delegating zone:
// - Forwards all allocations to the existing default zone
// - Does *not* claim to own any memory, meaning that it will always be
// skipped in free() in libsystem_malloc.dylib.
//
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
// the main library. Since the main library depends on many external
// libraries, we cannot install PartitionAlloc as the default zone without
// concurrency issues.
//
// Instead, what we do is here, while the process is single-threaded:
// - Register the delegating zone as the default one.
// - Set the original (libsystem_malloc's) one as the second zone
//
// Later, when PartitionAlloc initializes, we replace the default (delegating)
// zone with ours. The end state is:
// 1. PartitionAlloc zone
// 2. libsystem_malloc zone
// Set up of the delegating zone. Note that it doesn't just forward calls to
// the default zone. This is because the system zone's malloc_zone_t pointer
// actually points to a larger struct, containing allocator metadata. So if we
// pass as the first parameter the "simple" delegating zone pointer, then we
// immediately crash inside the system zone functions. So we need to replace
// the zone pointer as well.
//
// Calls fall into 4 categories:
// - Allocation calls: forwarded to the real system zone
// - "Is this pointer yours" calls: always answer no
// - free(): Should never be called, but is in practice, see comments below.
// - Diagnostics and debugging: these are typically called for every
// zone. They are no-ops for us, as we don't want to double-count, or lock
// the data structures of the real zone twice.
// Allocation: Forward to the real zone.
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
return g_default_zone->malloc(g_default_zone, size);
};
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
size_t size) {
return g_default_zone->calloc(g_default_zone, num_items, size);
};
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
return g_default_zone->valloc(g_default_zone, size);
};
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
return g_default_zone->realloc(g_default_zone, ptr, size);
};
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
void** results, unsigned num_requested) {
return g_default_zone->batch_malloc(g_default_zone, size, results,
num_requested);
};
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
size_t size) {
return g_default_zone->memalign(g_default_zone, alignment, size);
};
// Does ptr belong to this zone? Return value is != 0 if so.
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
return 0;
};
// Free functions.
// The normal path for freeing memory is:
// 1. Try all zones in order, call zone->size(ptr)
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
// 3. If no zone matches, crash.
//
// Since this zone always returns 0 in size() (see above), then zone->free()
// should never be called. Unfortunately, this is not the case, as some places
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
// crashing, forward the call. It's the caller's responsibility to use the
// same zone for free() as for the allocation (this is in the contract of
// malloc_zone_free()).
//
// However, note that the sequence of calls size() -> free() is not possible
// for this zone, as size() always returns 0.
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
return g_default_zone->free(g_default_zone, ptr);
};
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
size_t size) {
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
};
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
unsigned num_to_be_freed) {
return g_default_zone->batch_free(g_default_zone, to_be_freed,
num_to_be_freed);
};
#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
return g_default_zone->try_free_default(g_default_zone, ptr);
};
#endif
// Diagnostics and debugging.
//
// Do nothing to reduce memory footprint, the real
// zone will do it.
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
size_t goal) -> size_t { return 0; };
// Introspection calls are not all optional, for instance locking and
// unlocking before/after fork() is not optional.
//
// Nothing to enumerate.
g_delegating_zone_introspect.enumerator =
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder) -> kern_return_t {
return KERN_SUCCESS;
};
// Need to provide a real implementation, it is used for e.g. array sizing.
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
size_t size) {
return g_default_zone->introspect->good_size(g_default_zone, size);
};
// Nothing to do.
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
return true;
};
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
boolean_t verbose) {};
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
// Do not forward the lock / unlock calls. Since the default zone is still
// there, we should not lock here, as it would lock the zone twice (all
// zones are locked before fork().). Rather, do nothing, since this fake
// zone does not need any locking.
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
// No stats.
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
malloc_statistics_t* stats) {};
// We are not locked.
g_delegating_zone_introspect.zone_locked =
[](malloc_zone_t* zone) -> boolean_t { return false; };
// Don't support discharge checking.
g_delegating_zone_introspect.enable_discharge_checking =
[](malloc_zone_t* zone) -> boolean_t { return false; };
g_delegating_zone_introspect.disable_discharge_checking =
[](malloc_zone_t* zone) {};
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
void* memory) {};
// Could use something lower to support fewer functions, but this is
// consistent with the real zone installed by PartitionAlloc.
g_delegating_zone.version = kZoneVersion;
g_delegating_zone.introspect = &g_delegating_zone_introspect;
// This name is used in PartitionAlloc's initialization to determine whether
// it should replace the delegating zone.
g_delegating_zone.zone_name = kDelegatingZoneName;
// Register puts the new zone at the end, unregister swaps the new zone with
// the last one.
// The zone array is, after these lines, in order:
// 1. |g_default_zone|...|g_delegating_zone|
// 2. |g_delegating_zone|...| (no more default)
// 3. |g_delegating_zone|...|g_default_zone|
malloc_zone_register(&g_delegating_zone);
malloc_zone_unregister(g_default_zone);
malloc_zone_register(g_default_zone);
// Make sure that the purgeable zone is after the default one.
// Will make g_default_zone take the purgeable zone spot
malloc_zone_unregister(purgeable_zone);
// Add back the purgeable zone as the last one.
malloc_zone_register(purgeable_zone);
// Final configuration:
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
// Sanity check.
if (GetDefaultMallocZone() != &g_delegating_zone)
abort_report_np("Failed to install the delegating zone as default.");
}
void AllowDoublePartitionAllocZoneRegistration() {
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
if (result != KERN_SUCCESS)
abort_report_np("Cannot enumerate malloc() zones");
// If PartitionAlloc is one of the zones, *change* its name so that
// registration can happen multiple times. This works because zone
// registration only keeps a pointer to the struct, it does not copy the data.
for (unsigned int i = 0; i < zone_count; i++) {
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
if (zone->zone_name &&
strcmp(zone->zone_name, kPartitionAllocZoneName) == 0) {
zone->zone_name = "RenamedPartitionAlloc";
break;
}
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace partition_alloc

View File

@ -0,0 +1,45 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
// the process becomes multi-threaded.
namespace partition_alloc {
static constexpr char kDelegatingZoneName[] =
"DelegatingDefaultZoneForPartitionAlloc";
static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
// Zone version. Determines which callbacks are set in the various malloc_zone_t
// structs.
#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
(__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
#define PA_TRY_FREE_DEFAULT_IS_AVAILABLE 1
#endif
#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
constexpr int kZoneVersion = 13;
#else
constexpr int kZoneVersion = 9;
#endif
// Must be called *once*, *before* the process becomes multi-threaded.
void EarlyMallocZoneRegistration();
// Tricks the registration code to believe that PartitionAlloc was not already
// registered. This allows a future library load to register PartitionAlloc's
// zone as well, rather than bailing out.
//
// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
// ideally be removed. Indeed, by allowing two zones to be registered, we still
// end up with a split heap, and more memory usage.
//
// This is a hack for crbug.com/1274236.
void AllowDoublePartitionAllocZoneRegistration();
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_

View File

@ -0,0 +1,205 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "build/build_config.h"
namespace base {
namespace features {
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
kUnretainedDanglingPtrModeOption[] = {
{UnretainedDanglingPtrMode::kCrash, "crash"},
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
UnretainedDanglingPtrMode::kDumpWithoutCrashing,
&kUnretainedDanglingPtrModeOption,
};
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
&kDanglingPtrModeOption,
};
#if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, this instance belongs to the Control group of the BackupRefPtr
// binary experiment.
BASE_FEATURE(kPartitionAllocBackupRefPtrControl,
"PartitionAllocBackupRefPtrControl",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
// Not unconditionally enabled on 32 bit Android, since it is a
// more memory-constrained platform.
FEATURE_DISABLED_BY_DEFAULT
#else
FEATURE_ENABLED_BY_DEFAULT
#endif
);
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing",
FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif
);
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = {
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
"browser-and-renderer"},
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, "enabled-processes",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
BackupRefPtrEnabledProcesses::kBrowserOnly,
#endif
&kBackupRefPtrEnabledProcessesOptions
};
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
"disabled-but-3-way-split"},
{BackupRefPtrMode::kDisabledButAddDummyRefCount,
"disabled-but-add-dummy-ref-count"},
};
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions};
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
false}; // Not much noise at the moment to enable by default.
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
// If enabled, switches the bucket distribution to an alternate one. Only one of
// these features may b e enabled at a time.
BASE_FEATURE(kPartitionAllocUseAlternateDistribution,
"PartitionAllocUseAlternateDistribution",
FEATURE_DISABLED_BY_DEFAULT);
const base::FeatureParam<AlternateBucketDistributionMode>::Option
kPartitionAllocAlternateDistributionOption[] = {
{AlternateBucketDistributionMode::kDefault, "default"},
{AlternateBucketDistributionMode::kCoarser, "coarser"},
{AlternateBucketDistributionMode::kDenser, "denser"},
};
const base::FeatureParam<AlternateBucketDistributionMode>
kPartitionAllocAlternateBucketDistributionParam{
&kPartitionAllocUseAlternateDistribution, "mode",
AlternateBucketDistributionMode::kDefault,
&kPartitionAllocAlternateDistributionOption};
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to sort the active slot spans in PurgeMemory().
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
"PartitionAllocSortActiveSlotSpans",
FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(IS_WIN)
// Whether to retry allocations when commit fails.
BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
"PageAllocatorRetryOnCommitFailure",
FEATURE_DISABLED_BY_DEFAULT);
#endif
} // namespace features
} // namespace base

View File

@ -0,0 +1,132 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "build/build_config.h"
namespace base {
namespace features {
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr.
kCrash, // (default)
// Log the signature of every occurrences without crashing. It is used by
// bots.
// Format "[DanglingSignature]\t<1>\t<2>"
// 1. The function who freed the memory while it was still referenced.
// 2. The function who released the raw_ptr reference.
kLogSignature,
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
#if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtrControl);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
enum class BackupRefPtrEnabledProcesses {
// BRP enabled only in the browser process.
kBrowserOnly,
// BRP enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// BRP enabled in all processes, except renderer.
kNonRenderer,
// BRP enabled in all processes.
kAllProcesses,
};
enum class BackupRefPtrMode {
// BRP is disabled across all partitions. Equivalent to the Finch flag being
// disabled.
kDisabled,
// BRP is enabled in the main partition, as well as certain Renderer-only
// partitions (if enabled in Renderer at all).
// This entails splitting the main partition.
kEnabled,
// Same as kEnabled but without zapping quarantined objects.
kEnabledWithoutZapping,
// BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode.
kDisabledButSplitPartitions2Way,
// BRP is disabled, but the main partition *and* aligned partition are split
// out, as if BRP was enabled in the "before allocation" mode.
kDisabledButSplitPartitions3Way,
// BRP is disabled, but add dummy ref count to each allocation. This will
// increase allocation size but not change any of the logic. If an issue
// reproduce in this mode, it means the increase in size is causing it.
kDisabledButAddDummyRefCount,
};
enum class AlternateBucketDistributionMode : uint8_t {
kDefault,
kCoarser,
kDenser,
};
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableDereferenceCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableExtractionCheckParam;
extern const BASE_EXPORT base::FeatureParam<bool>
kBackupRefPtrAsanEnableInstantiationCheckParam;
extern const BASE_EXPORT base::FeatureParam<AlternateBucketDistributionMode>
kPartitionAllocAlternateBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseAlternateDistribution);
#if BUILDFLAG(IS_WIN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif
} // namespace features
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View File

@ -0,0 +1,603 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_alloc_support.h"
#include <array>
#include <cinttypes>
#include <cstdint>
#include <map>
#include <string>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/check.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/stack_trace.h"
#include "base/debug/task_trace.h"
#include "base/feature_list.h"
#include "base/immediate_crash.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/task/single_thread_task_runner.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "base/trace_event/base_tracing.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#if BUILDFLAG(STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
#endif // BUILDFLAG(STARSCAN)
namespace base {
namespace allocator {
namespace {
#if defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString(
partition_alloc::internal::StatsCollector::ScannerId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear";
case partition_alloc::internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan";
case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep";
case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner";
case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable();
}
}
constexpr const char* MutatorIdToTracingString(
partition_alloc::internal::StatsCollector::MutatorId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear";
case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack";
case partition_alloc::internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan";
case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator";
case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable();
}
}
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter {
public:
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::ScannerId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = ScannerIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::MutatorId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = MutatorIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportSurvivedQuarantineSize(size_t survived_size) override {
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
survived_size);
}
void ReportSurvivedQuarantinePercent(double survived_rate) override {
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
// divide back.
// TODO(bikineev): Remove after switching to perfetto.
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
1000 * survived_rate);
}
void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
TimeDelta sample = Microseconds(sample_in_usec);
UmaHistogramTimes(stats_name, sample);
}
private:
static constexpr char kTraceCategory[] = "partition_alloc";
};
#endif // defined(PA_ALLOW_PCSCAN)
} // namespace
#if defined(PA_ALLOW_PCSCAN)
void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter;
static bool registered = false;
DCHECK(!registered);
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // defined(PA_ALLOW_PCSCAN)
namespace {
void RunThreadCachePeriodicPurge() {
// Micros, since periodic purge should typically take at most a few ms.
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
TRACE_EVENT0("memory", "PeriodicPurge");
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
instance.RunPeriodicPurge();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
}
void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
{
// Micros, since memory reclaiming should typically take at most a few ms.
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
instance->ReclaimNormal();
}
TimeDelta delay =
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
task_runner->PostDelayedTask(
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
}
} // namespace
void StartThreadCachePeriodicPurge() {
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
}
void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
// Can be called several times.
static bool is_memory_reclaimer_running = false;
if (is_memory_reclaimer_running)
return;
is_memory_reclaimer_running = true;
// The caller of the API fully controls where running the reclaim.
// However there are a few reasons to recommend that the caller runs
// it on the main thread:
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
// is more likely in cache when executing on the main thread.
// - Memory reclaim takes the partition lock for each partition. As a
// consequence, while reclaim is running, the main thread is unlikely to be
// able to make progress, as it would be waiting on the lock.
// - Finally, this runs in idle time only, so there should be no visible
// impact.
//
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
// seconds is useful. Since this is meant to run during idle time only, it is
// a reasonable starting point balancing effectivenes vs cost. See
// crbug.com/942512 for details and experimental results.
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
TimeDelta delay =
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
task_runner->PostDelayedTask(
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
}
std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
std::map<std::string, std::string> trials;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// BackupRefPtr_Effective and PCScan_Effective record whether or not
// BackupRefPtr and/or PCScan are enabled. The experiments aren't independent,
// so having a synthetic Finch will help look only at cases where one isn't
// affected by the other.
// Whether PartitionAllocBackupRefPtr is enabled (as determined by
// FeatureList::IsEnabled).
[[maybe_unused]] bool brp_finch_enabled = false;
// Whether PartitionAllocBackupRefPtr is set up for the default behavior. The
// default behavior is when either the Finch flag is disabled, or is enabled
// in brp-mode=disabled (these two options are equivalent).
[[maybe_unused]] bool brp_nondefault_behavior = false;
// Whether PartitionAllocBackupRefPtr is set up to enable BRP protection. It
// requires the Finch flag to be enabled and brp-mode!=disabled*. Some modes,
// e.g. disabled-but-3-way-split, do something (hence can't be considered the
// default behavior), but don't enable BRP protection.
[[maybe_unused]] bool brp_truly_enabled = false;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr))
brp_finch_enabled = true;
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() !=
features::BackupRefPtrMode::kDisabled)
brp_nondefault_behavior = true;
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() ==
features::BackupRefPtrMode::kEnabled)
brp_truly_enabled = true;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
[[maybe_unused]] bool pcscan_enabled =
#if defined(PA_ALLOW_PCSCAN)
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
#else
false;
#endif
std::string brp_group_name = "Unavailable";
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pcscan_enabled) {
// If PCScan is enabled, just ignore the population.
brp_group_name = "Ignore_PCScanIsOn";
} else if (!brp_finch_enabled) {
// The control group is actually disguised as "enabled", but in fact it's
// disabled using a param. This is to differentiate the population that
// participates in the control group, from the population that isn't in any
// group.
brp_group_name = "Ignore_NoGroup";
} else {
switch (features::kBackupRefPtrModeParam.Get()) {
case features::BackupRefPtrMode::kDisabled:
brp_group_name = "Disabled";
break;
case features::BackupRefPtrMode::kEnabled:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlot";
#else
brp_group_name = "EnabledBeforeAlloc";
#endif
break;
case features::BackupRefPtrMode::kEnabledWithoutZapping:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlotWithoutZapping";
#else
brp_group_name = "EnabledBeforeAllocWithoutZapping";
#endif
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
brp_group_name = "DisabledBut2WaySplit";
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
brp_group_name = "DisabledBut3WaySplit";
break;
case features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
brp_group_name = "DisabledButAddDummyRefCount";
break;
}
if (features::kBackupRefPtrModeParam.Get() !=
features::BackupRefPtrMode::kDisabled) {
std::string process_selector;
switch (features::kBackupRefPtrEnabledProcessesParam.Get()) {
case features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_selector = "BrowserOnly";
break;
case features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_selector = "BrowserAndRenderer";
break;
case features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_selector = "NonRenderer";
break;
case features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_selector = "AllProcesses";
break;
}
brp_group_name += ("_" + process_selector);
}
}
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
trials.emplace("BackupRefPtr_Effective", brp_group_name);
// On 32-bit architectures, PCScan is not supported and permanently disabled.
// Don't lump it into "Disabled", so that belonging to "Enabled"/"Disabled" is
// fully controlled by Finch and thus have identical population sizes.
std::string pcscan_group_name = "Unavailable";
std::string pcscan_group_name_fallback = "Unavailable";
#if defined(PA_ALLOW_PCSCAN)
if (brp_truly_enabled) {
// If BRP protection is enabled, just ignore the population. Check
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes
// where BRP protection is actually disabled.
pcscan_group_name = "Ignore_BRPIsOn";
} else {
pcscan_group_name = (pcscan_enabled ? "Enabled" : "Disabled");
}
// In case we are incorrect that PCScan is independent of partition-split
// modes, create a fallback trial that only takes into account the BRP Finch
// settings that preserve the default behavior.
if (brp_nondefault_behavior) {
pcscan_group_name_fallback = "Ignore_BRPIsOn";
} else {
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
}
#endif // defined(PA_ALLOW_PCSCAN)
trials.emplace("PCScan_Effective", pcscan_group_name);
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
trials.emplace("DanglingPointerDetector", "Enabled");
#else
trials.emplace("DanglingPointerDetector", "Disabled");
#endif
return trials;
}
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
namespace {
internal::PartitionLock g_stack_trace_buffer_lock;
struct StackTraceWithID {
debug::StackTrace stack_trace;
uintptr_t id = 0;
};
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
void DanglingRawPtrDetected(uintptr_t id) {
// This is called from inside the allocator. No allocation is allowed.
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
#if DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer)
PA_DCHECK(!entry || entry->id != id);
#endif // DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (!entry) {
entry = {debug::StackTrace(), id};
return;
}
}
// The StackTrace hasn't been recorded, because the buffer isn't large
// enough.
}
// From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one
// whose id match |id|. Return nullopt if not found.
absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (entry && entry->id == id) {
debug::StackTrace stack_trace = std::move(entry->stack_trace);
entry = absl::nullopt;
return stack_trace;
}
}
return absl::nullopt;
}
// Extract from the StackTrace output, the signature of the pertinent caller.
// This function is meant to be used only by Chromium developers, to list what
// are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) {
std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
// We are looking for the callers of the function releasing the raw_ptr and
// freeing memory:
const StringPiece callees[] = {
"internal::BackupRefPtrImpl<>::ReleaseInternal()",
"internal::PartitionFree()",
"base::(anonymous namespace)::FreeFn()",
};
size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) {
for (const auto& callee : callees) {
if (lines[i].find(callee) != StringPiece::npos) {
caller_index = i + 1;
}
}
}
if (caller_index >= lines.size()) {
return "undefined";
}
StringPiece caller = lines[caller_index];
// |callers| follows the following format:
//
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend()
// -- -------------- -----------------------------------------------
// Depth Address Function
size_t address_start = caller.find(' ');
size_t function_start = caller.find(' ', address_start + 1);
if (address_start == caller.npos || function_start == caller.npos) {
return "undefined";
}
return std::string(caller.substr(function_start + 1));
}
void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
} else {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
}
}
void DanglingRawPtrReleasedCrash(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
static const char dangling_ptr_footer[] =
"\n"
"\n"
"Please check for more information on:\n"
"https://chromium.googlesource.com/chromium/src/+/main/docs/"
"dangling_ptr_guide.md\n";
if (stack_trace_free) {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< "The memory was freed at:\n"
<< *stack_trace_free << "\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
} else {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
}
ImmediateCrash();
}
void ClearDanglingRawPtrBuffer() {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
g_stack_trace_buffer = DanglingRawPtrBuffer();
}
} // namespace
void InstallDanglingRawPtrChecks() {
// Clearing storage is useful for running multiple unit tests without
// restarting the test executable.
ClearDanglingRawPtrBuffer();
if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
return;
}
switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
break;
case features::DanglingPtrMode::kLogSignature:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(
DanglingRawPtrReleasedLogSignature);
break;
}
}
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
// is a dangling pointer, we should crash at some point. Consider providing an
// API to periodically check the buffer.
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
void InstallDanglingRawPtrChecks() {}
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
PA_NO_CODE_FOLDING();
debug::DumpWithoutCrashing();
}
void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
debug::TaskTrace task_trace;
debug::StackTrace stack_trace;
LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< task_trace << stack_trace;
ImmediateCrash();
}
void InstallUnretainedDanglingRawPtrChecks() {
if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
return;
}
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
switch (features::kUnretainedDanglingPtrModeParam.Get()) {
case features::UnretainedDanglingPtrMode::kCrash:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedCrash);
break;
case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
break;
}
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,43 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
#include <map>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/memory/scoped_refptr.h"
#include "base/task/sequenced_task_runner.h"
namespace base {
namespace allocator {
#if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge();
BASE_EXPORT void StartMemoryReclaimer(
scoped_refptr<SequencedTaskRunner> task_runner);
BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
// Install handlers for when dangling raw_ptr(s) have been detected. This prints
// two StackTraces. One where the memory is freed, one where the last dangling
// raw_ptr stopped referencing it.
//
// This is currently effective, only when compiled with
// `enable_dangling_raw_ptr_checks` build flag.
BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_

View File

@ -0,0 +1,10 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"

View File

@ -0,0 +1,463 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//build/buildflag_header.gni")
import("//build/config/chromecast_build.gni")
import("//build/config/chromeos/ui_mode.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/logging.gni")
# Add partition_alloc.gni and import it for partition_alloc configs.
config("partition_alloc_implementation") {
# See also: `partition_alloc_base/component_export.h`
defines = [ "IS_PARTITION_ALLOC_IMPL" ]
}
config("memory_tagging") {
if (current_cpu == "arm64" && is_clang &&
(is_linux || is_chromeos || is_android || is_fuchsia)) {
# base/ has access to the MTE intrinsics because it needs to use them,
# but they're not backwards compatible. Use base::CPU::has_mte()
# beforehand to confirm or use indirect functions (ifuncs) to select
# an MTE-specific implementation at dynamic link-time.
cflags = [
"-Xclang",
"-target-feature",
"-Xclang",
"+mte",
]
}
}
if (is_fuchsia) {
config("fuchsia_sync_lib") {
libs = [
"sync", # Used by spinning_mutex.h.
]
}
}
if (enable_pkeys && is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
}
component("partition_alloc") {
sources = [
"address_pool_manager.cc",
"address_pool_manager.h",
"address_pool_manager_bitmap.cc",
"address_pool_manager_bitmap.h",
"address_pool_manager_types.h",
"address_space_randomization.cc",
"address_space_randomization.h",
"address_space_stats.h",
"allocation_guard.cc",
"allocation_guard.h",
"compressed_pointer.cc",
"compressed_pointer.h",
"dangling_raw_ptr_checks.cc",
"dangling_raw_ptr_checks.h",
"freeslot_bitmap.h",
"freeslot_bitmap_constants.h",
"gwp_asan_support.cc",
"gwp_asan_support.h",
"memory_reclaimer.cc",
"memory_reclaimer.h",
"oom.cc",
"oom.h",
"oom_callback.cc",
"oom_callback.h",
"page_allocator.cc",
"page_allocator.h",
"page_allocator_constants.h",
"page_allocator_internal.h",
"partition_address_space.cc",
"partition_address_space.h",
"partition_alloc-inl.h",
"partition_alloc.cc",
"partition_alloc.h",
"partition_alloc_base/atomic_ref_count.h",
"partition_alloc_base/augmentations/compiler_specific.h",
"partition_alloc_base/bit_cast.h",
"partition_alloc_base/bits.h",
"partition_alloc_base/check.cc",
"partition_alloc_base/check.h",
"partition_alloc_base/compiler_specific.h",
"partition_alloc_base/component_export.h",
"partition_alloc_base/cpu.cc",
"partition_alloc_base/cpu.h",
"partition_alloc_base/cxx17_backports.h",
"partition_alloc_base/debug/alias.cc",
"partition_alloc_base/debug/alias.h",
"partition_alloc_base/gtest_prod_util.h",
"partition_alloc_base/immediate_crash.h",
"partition_alloc_base/logging.cc",
"partition_alloc_base/logging.h",
"partition_alloc_base/memory/ref_counted.cc",
"partition_alloc_base/memory/ref_counted.h",
"partition_alloc_base/memory/scoped_policy.h",
"partition_alloc_base/memory/scoped_refptr.h",
"partition_alloc_base/migration_adapter.h",
"partition_alloc_base/no_destructor.h",
"partition_alloc_base/numerics/checked_math.h",
"partition_alloc_base/numerics/checked_math_impl.h",
"partition_alloc_base/numerics/clamped_math.h",
"partition_alloc_base/numerics/clamped_math_impl.h",
"partition_alloc_base/numerics/math_constants.h",
"partition_alloc_base/numerics/ostream_operators.h",
"partition_alloc_base/numerics/ranges.h",
"partition_alloc_base/numerics/safe_conversions.h",
"partition_alloc_base/numerics/safe_conversions_arm_impl.h",
"partition_alloc_base/numerics/safe_conversions_impl.h",
"partition_alloc_base/numerics/safe_math.h",
"partition_alloc_base/numerics/safe_math_arm_impl.h",
"partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
"partition_alloc_base/numerics/safe_math_shared_impl.h",
"partition_alloc_base/posix/eintr_wrapper.h",
"partition_alloc_base/rand_util.cc",
"partition_alloc_base/rand_util.h",
"partition_alloc_base/scoped_clear_last_error.h",
"partition_alloc_base/strings/stringprintf.cc",
"partition_alloc_base/strings/stringprintf.h",
"partition_alloc_base/system/sys_info.h",
"partition_alloc_base/thread_annotations.h",
"partition_alloc_base/threading/platform_thread.cc",
"partition_alloc_base/threading/platform_thread.h",
"partition_alloc_base/threading/platform_thread_ref.h",
"partition_alloc_base/time/time.cc",
"partition_alloc_base/time/time.h",
"partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/win_handle_types.h",
"partition_alloc_base/win/win_handle_types_list.inc",
"partition_alloc_base/win/windows_types.h",
"partition_alloc_check.h",
"partition_alloc_config.h",
"partition_alloc_constants.h",
"partition_alloc_forward.h",
"partition_alloc_hooks.cc",
"partition_alloc_hooks.h",
"partition_alloc_notreached.h",
"partition_bucket.cc",
"partition_bucket.h",
"partition_bucket_lookup.h",
"partition_cookie.h",
"partition_direct_map_extent.h",
"partition_freelist_entry.h",
"partition_lock.h",
"partition_oom.cc",
"partition_oom.h",
"partition_page.cc",
"partition_page.h",
"partition_ref_count.h",
"partition_root.cc",
"partition_root.h",
"partition_stats.cc",
"partition_stats.h",
"partition_tag.h",
"partition_tag_bitmap.h",
"partition_tag_types.h",
"partition_tls.h",
"pkey.cc",
"pkey.h",
"random.cc",
"random.h",
"reservation_offset_table.cc",
"reservation_offset_table.h",
"reverse_bytes.h",
"spinning_mutex.cc",
"spinning_mutex.h",
"tagging.cc",
"tagging.h",
"thread_cache.cc",
"thread_cache.h",
"yield_processor.h",
]
# Add *Scan sources if building inside Chromium. Currently,
# we see no need to add a more dedicated buildflag for this, as
# we don't anticipate Chromium-external usage of *Scan.
if (build_with_chromium) {
sources += [
"starscan/logging.h",
"starscan/metadata_allocator.cc",
"starscan/metadata_allocator.h",
"starscan/pcscan.cc",
"starscan/pcscan.h",
"starscan/pcscan_internal.cc",
"starscan/pcscan_internal.h",
"starscan/pcscan_scheduling.cc",
"starscan/pcscan_scheduling.h",
"starscan/raceful_worklist.h",
"starscan/scan_loop.h",
"starscan/snapshot.cc",
"starscan/snapshot.h",
"starscan/stack/stack.cc",
"starscan/stack/stack.h",
"starscan/starscan_fwd.h",
"starscan/state_bitmap.h",
"starscan/stats_collector.cc",
"starscan/stats_collector.h",
"starscan/stats_reporter.h",
"starscan/write_protector.cc",
"starscan/write_protector.h",
]
}
defines = []
if (is_win) {
sources += [
"page_allocator_internals_win.h",
"partition_alloc_base/rand_util_win.cc",
"partition_alloc_base/scoped_clear_last_error_win.cc",
"partition_alloc_base/threading/platform_thread_win.cc",
"partition_alloc_base/time/time_win.cc",
"partition_tls_win.cc",
]
} else if (is_posix) {
sources += [
"page_allocator_internals_posix.cc",
"page_allocator_internals_posix.h",
"partition_alloc_base/files/file_util.h",
"partition_alloc_base/files/file_util_posix.cc",
"partition_alloc_base/posix/safe_strerror.cc",
"partition_alloc_base/posix/safe_strerror.h",
"partition_alloc_base/rand_util_posix.cc",
"partition_alloc_base/threading/platform_thread_internal_posix.h",
"partition_alloc_base/threading/platform_thread_posix.cc",
"partition_alloc_base/time/time_conversion_posix.cc",
]
if (is_android || is_chromeos_ash) {
sources += [ "partition_alloc_base/time/time_android.cc" ]
}
if (is_apple) {
sources += [ "partition_alloc_base/time/time_mac.mm" ]
} else {
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
}
} else if (is_fuchsia) {
sources += [
"page_allocator_internals_fuchsia.h",
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
"partition_alloc_base/fuchsia/fuchsia_logging.h",
"partition_alloc_base/posix/safe_strerror.cc",
"partition_alloc_base/posix/safe_strerror.h",
"partition_alloc_base/rand_util_fuchsia.cc",
"partition_alloc_base/threading/platform_thread_internal_posix.h",
"partition_alloc_base/threading/platform_thread_posix.cc",
"partition_alloc_base/time/time_conversion_posix.cc",
"partition_alloc_base/time/time_fuchsia.cc",
]
}
if (is_android) {
# Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true.
sources += [
"partition_alloc_base/files/file_path.cc",
"partition_alloc_base/files/file_path.h",
"partition_alloc_base/native_library.cc",
"partition_alloc_base/native_library.h",
"partition_alloc_base/native_library_posix.cc",
]
}
if (is_apple) {
# Apple-specific utilities
sources += [
"partition_alloc_base/mac/foundation_util.h",
"partition_alloc_base/mac/foundation_util.mm",
"partition_alloc_base/mac/scoped_cftyperef.h",
"partition_alloc_base/mac/scoped_typeref.h",
]
if (is_ios) {
sources += [
"partition_alloc_base/ios/ios_util.h",
"partition_alloc_base/ios/ios_util.mm",
"partition_alloc_base/system/sys_info_ios.mm",
]
}
if (is_mac) {
sources += [
"partition_alloc_base/mac/mac_util.h",
"partition_alloc_base/mac/mac_util.mm",
"partition_alloc_base/system/sys_info_mac.mm",
]
}
}
if (build_with_chromium) {
if (current_cpu == "x64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
} else if (current_cpu == "arm") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
} else {
# To support a trampoline for another arch, please refer to v8/src/heap/base.
}
}
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
]
configs += [
":partition_alloc_implementation",
":memory_tagging",
]
deps = []
public_configs = []
if (is_android) {
# tagging.cc requires __arm_mte_set_* functions.
deps += [ "//third_party/android_ndk:cpu_features" ]
}
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
frameworks = []
if (is_mac) {
# SecTaskGetCodeSignStatus needs:
frameworks += [ "Security.framework" ]
}
if (is_apple) {
frameworks += [
"CoreFoundation.framework",
"Foundation.framework",
]
}
configs += [ "//build/config/compiler:wexit_time_destructors" ]
# Partition alloc is relatively hot (>1% of cycles for users of CrOS). Use speed-focused
# optimizations for it.
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_speed" ]
}
# We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) {
configs += [ ":no_stack_protector" ]
}
}
buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"
_record_alloc_info = false
# GWP-ASan is tied to BRP's "refcount in previous slot" mode, whose
# enablement is already gated on BRP enablement.
_enable_gwp_asan_support = put_ref_count_in_previous_slot
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# partition alloc. For partition alloc,
# gen/base/allocator/partition_allocator/partition_alloc_buildflags.h
# defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"USE_PARTITION_ALLOC=$use_partition_alloc",
"ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
# Not to be used directly - instead use
# defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
"ENABLE_MTE_CHECKED_PTR_SUPPORT=$enable_mte_checked_ptr_support",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$glue_core_pools",
"ENABLE_POINTER_COMPRESSION=$enable_pointer_compression_support",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
# *Scan is currently only used by Chromium.
"STARSCAN=$build_with_chromium",
"ENABLE_PKEYS=$enable_pkeys",
]
}
buildflag_header("chromecast_buildflags") {
header = "chromecast_buildflags.h"
flags = [
"PA_IS_CAST_ANDROID=$is_cast_android",
"PA_IS_CASTOS=$is_castos",
]
}
buildflag_header("chromeos_buildflags") {
header = "chromeos_buildflags.h"
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
}
buildflag_header("logging_buildflags") {
header = "logging_buildflags.h"
flags = [ "PA_ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached" ]
}
buildflag_header("debugging_buildflags") {
header = "debugging_buildflags.h"
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
# but avails it as a buildflag.
_dcheck_is_on = is_debug || dcheck_always_on
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
]
}
group("buildflags") {
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
]
}
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
# move test code here. i.e. test("partition_alloc_tests") { ... } and
# test("partition_alloc_perftests").

View File

@ -0,0 +1,157 @@
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# PartitionAlloc is planned to be extracted into a standalone library, and
# therefore dependencies need to be strictly controlled and minimized.
gclient_gn_args_file = 'partition_allocator/build/config/gclient_args.gni'
# Only these hosts are allowed for dependencies in this DEPS file.
# This is a subset of chromium/src/DEPS's allowed_hosts.
allowed_hosts = [
'chromium.googlesource.com',
]
vars = {
'chromium_git': 'https://chromium.googlesource.com',
}
deps = {
'partition_allocator/build':
Var('chromium_git') + '/chromium/src/build.git',
'partition_allocator/buildtools':
Var('chromium_git') + '/chromium/src/buildtools.git',
'partition_allocator/buildtools/clang_format/script':
Var('chromium_git') +
'/external/github.com/llvm/llvm-project/clang/tools/clang-format.git',
'partition_allocator/buildtools/linux64': {
'packages': [
{
'package': 'gn/gn/linux-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "linux"',
},
'partition_allocator/buildtools/mac': {
'packages': [
{
'package': 'gn/gn/mac-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "mac"',
},
'partition_allocator/buildtools/win': {
'packages': [
{
'package': 'gn/gn/windows-amd64',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "win"',
},
'partition_allocator/buildtools/third_party/libc++/trunk':
Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git',
'partition_allocator/buildtools/third_party/libc++abi/trunk':
Var('chromium_git') +
'/external/github.com/llvm/llvm-project/libcxxabi.git',
'partition_allocator/tools/clang':
Var('chromium_git') + '/chromium/src/tools/clang.git',
}
hooks = [
{
'name': 'sysroot_arm',
'pattern': '.',
'condition': 'checkout_linux and checkout_arm',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm'],
},
{
'name': 'sysroot_arm64',
'pattern': '.',
'condition': 'checkout_linux and checkout_arm64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm64'],
},
{
'name': 'sysroot_x86',
'pattern': '.',
'condition': 'checkout_linux and (checkout_x86 or checkout_x64)',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x86'],
},
{
'name': 'sysroot_mips',
'pattern': '.',
'condition': 'checkout_linux and checkout_mips',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=mips'],
},
{
'name': 'sysroot_mips64',
'pattern': '.',
'condition': 'checkout_linux and checkout_mips64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=mips64el'],
},
{
'name': 'sysroot_x64',
'pattern': '.',
'condition': 'checkout_linux and checkout_x64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x64'],
},
{
# Update the prebuilt clang toolchain.
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
'action': ['python3', 'partition_allocator/tools/clang/scripts/update.py'],
},
]
noparent = True
include_rules = [
"+build/build_config.h",
"+build/buildflag.h",
"+third_party/lss/linux_syscall_support.h",
]
specific_include_rules = {
".*_(perf|unit)test\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",
"+base/timer/lap_timer.h",
"+base/win/windows_version.h",
"+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h",
],
"extended_api\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
],
"gtest_prod_util\.h$": [
"+testing/gtest/include/gtest/gtest_prod.h",
],
}

View File

@ -0,0 +1,6 @@
monorail {
component: "Blink>MemoryAllocator>Partition"
}
# Also security-dev@chromium.org
team_email: "platform-architecture-dev@chromium.org"

View File

@ -0,0 +1,4 @@
bartekn@chromium.org
haraken@chromium.org
lizeb@chromium.org
tasak@google.com

View File

@ -0,0 +1,203 @@
# PartitionAlloc Design
This document describes PartitionAlloc at a high level, with some architectural
details. For implementation details, see the comments in
`partition_alloc_constants.h`.
## Quick Links
* [Glossary](./glossary.md): Definitions of terms commonly used in
PartitionAlloc. The present document largely avoids defining terms.
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and
macros.
* [Chrome-External Builds](./external_builds.md): Further considerations
for standalone PartitionAlloc, plus an embedder's guide for some extra
GN args.
## Overview
PartitionAlloc is a memory allocator optimized for space efficiency,
allocation latency, and security.
### Performance
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
paths of allocation and deallocation require very few (reasonably predictable)
branches. The number of operations in the fast paths is minimal, leading to the
possibility of inlining.
![The central allocator manages slots and spans. It is locked on a
per-partition basis. Separately, the thread cache consumes slots
from the central allocator, allowing it to hand out memory
quickly to individual threads.](./dot/layers.png)
However, even the fast path isn't the fastest, because it requires taking
a per-partition lock. Although we optimized the lock, there was still room for
improvement; to this end, we introduced the thread cache.
The thread cache has been tailored to satisfy a vast majority of requests by
allocating from and releasing memory to the main allocator in batches,
amortizing lock acquisition and further improving locality while not trapping
excess memory.
### Security
Security is one of the important goals of PartitionAlloc.
PartitionAlloc guarantees that different partitions exist in different regions
of the process's address space. When the caller has freed all objects contained
in a page in a partition, PartitionAlloc returns the physical memory to the
operating system, but continues to reserve the region of address space.
PartitionAlloc will only reuse an address space region for the same partition.
Similarly, one page can contain only objects from the same bucket.
When freed, PartitionAlloc returns the physical memory, but continues to reserve
the region for this very bucket.
The above techniques help avoid type confusion attacks. Note, however, these
apply only to normal buckets and not to direct map, as it'd waste too much
address space.
PartitionAlloc also guarantees that:
* Linear overflows/underflows cannot corrupt into, out of, or between
partitions. There are guard pages at the beginning and the end of each memory
region owned by a partition.
* Linear overflows/underflows cannot corrupt the allocation metadata.
PartitionAlloc records metadata in a dedicated, out-of-line region (not
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
exception.)
* Partial pointer overwrite of freelist pointer should fault.
* Direct map allocations have guard pages at the beginning and the end.
### Alignment
PartitionAlloc guarantees that returned pointers are aligned on
`partition_alloc::internal::kAlignment` boundary (typically 16B on
64-bit systems, and 8B on 32-bit).
PartitionAlloc also supports higher levels of alignment, that can be requested
via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
`posix_memalign()`). The requested
alignment has to be a power of two. PartitionAlloc reserves the right to round
up the requested size to the nearest power of two, greater than or equal to the
requested alignment. This may be wasteful, but allows taking advantage of
natural PartitionAlloc alignment guarantees. Allocations with an alignment
requirement greater than `partition_alloc::internal::kAlignment` are expected
to be very rare.
## Architecture
### Layout in Memory
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
pages. Each super page is split into partition pages.
The first and the last partition page are permanently inaccessible and serve
as guard pages, with the exception of one system page in the middle of the first
partition page that holds metadata (32B struct per partition page).
![A super page is shown full of slot spans. The slot spans are logically
strung together to form buckets. At both extremes of the super page
are guard pages. PartitionAlloc metadata is hidden inside the
guard pages at the "front."](./dot/super-page.png)
* The slot span numbers provide a visual hint of their size (in partition
pages).
* Colors provide a visual hint of the bucket to which the slot span belongs.
* Although only five colors are shown, in reality, a super page holds
tens of slot spans, some of which belong to the same bucket.
* The system page that holds metadata tracks each partition page with one 32B
[`PartitionPage` struct][PartitionPage], which is either
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
diagram).
* Gray fill denotes guard pages (one partition page each at the head and tail
of each super page).
* In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation.
See [`SuperPagePayloadBegin()`][payload-start] for details.
As allocation requests arrive, there is eventually a need to allocate a new slot
span.
Address space for such a slot span is carved out from the last super page. If
not enough space, a new super page is allocated. Due to varying sizes of slot
span, this may lead to leaving space unused (we never go back to fill previous
super pages), which is fine because this memory is merely reserved, which is far
less precious than committed memory. Note also that address space reserved for a
slot span is never released, even if the slot span isn't used for a long time.
All slots in a newly allocated slot span are *free*, i.e. available for
allocation.
### Freelist Pointers
All free slots within a slot span are chained into a singly-linked free-list,
by writing the *next* pointer at the beginning of each slot, and the head of the
list is written in the metadata struct.
However, writing a pointer in each free slot of a newly allocated span would
require committing and faulting in physical pages upfront, which would be
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
Only provisioned slots are chained into the freelist.
Once provisioned slots in a span are depleted, then another page worth of slots
is provisioned (note, a slot that crosses a page boundary only gets
provisioned with slots of the next page). See
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
Freelist pointers are stored at the beginning of each free slot. As such, they
are the only metadata that is inline, i.e. stored among the
objects. This makes them prone to overruns. On little-endian systems, the
pointers are encoded by reversing byte order, so that partial overruns will very
likely result in destroying the pointer, as opposed to forming a valid pointer
to a nearby location.
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
different manner. This helps PartitionAlloc detect corruptions.
### Slot Span States
A slot span can be in any of 4 states:
* *Full*. A full span has no free slots.
* *Empty*. An empty span has no allocated slots, only free slots.
* *Active*. An active span is anything in between the above two.
* *Decommitted*. A decommitted span is a special case of an empty span, where
all pages are decommitted from memory.
PartitionAlloc prioritizes getting an available slot from an active span, over
an empty one, in hope that the latter can be soon transitioned into a
decommitted state, thus releasing memory. There is no mechanism, however, to
prioritize selection of a slot span based on the number of already allocated
slots.
An empty span becomes decommitted either when there are too many empty spans
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
low memory pressure conditions). An allocation can be satisfied from
a decommitted span if there are no active or empty spans available. The slot
provisioning mechanism kicks back in, committing the pages gradually as needed,
and the span becomes active. (There is currently no other way
to unprovision slots than decommitting the entire span).
As mentioned above, a bucket is a collection of slot spans containing slots of
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
There is no need for a full span list. The lists are updated lazily. An empty,
decommitted or full span may stay on the active list for some time, until
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
A decommitted span may stay on the empty list for some time,
until `PartitionBucket<thread_safe>::SlowPathAlloc()` encounters it. However,
the inaccuracy can't happen in the other direction, i.e. an active span can only
be on the active list, and an empty span can only be on the active or empty
list.
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454

View File

@ -0,0 +1,543 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <limits>
#include "base/allocator/partition_allocator/address_space_stats.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_PKEYS)
#include <sys/mman.h>
#endif
namespace partition_alloc::internal {
AddressPoolManager AddressPoolManager::singleton_;
// static
AddressPoolManager& AddressPoolManager::GetInstance() {
return singleton_;
}
#if defined(PA_HAS_64_BITS_POINTERS)
namespace {
// This will crash if the range cannot be decommitted.
void DecommitPages(uintptr_t address, size_t size) {
// Callers rely on the pages being zero-initialized when recommitting them.
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
// particular on macOS, but |DecommitAndZeroSystemPages| does.
DecommitAndZeroSystemPages(address, size);
}
} // namespace
void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
PA_CHECK(handle > 0 && handle <= std::size(aligned_pools_.pools_));
Pool* pool = GetPool(handle);
PA_CHECK(!pool->IsInitialized());
pool->Initialize(ptr, length);
}
void AddressPoolManager::GetPoolUsedSuperPages(
pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used) {
Pool* pool = GetPool(handle);
if (!pool)
return;
pool->GetUsedSuperPages(used);
}
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
Pool* pool = GetPool(handle);
if (!pool)
return 0;
return pool->GetBaseAddress();
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i)
aligned_pools_.pools_[i].Reset();
}
void AddressPoolManager::Remove(pool_handle handle) {
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
pool->Reset();
}
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
Pool* pool = GetPool(handle);
if (!requested_address)
return pool->FindChunk(length);
const bool is_available = pool->TryReserveChunk(requested_address, length);
if (is_available)
return requested_address;
return pool->FindChunk(length);
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
DecommitPages(address, length);
pool->FreeChunk(address, length);
}
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(ptr != 0);
PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr;
#if BUILDFLAG(PA_DCHECK_IS_ON)
address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_);
#endif
total_bits_ = length / kSuperPageSize;
PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
ScopedGuard scoped_lock(lock_);
alloc_bitset_.reset();
bit_hint_ = 0;
}
bool AddressPoolManager::Pool::IsInitialized() {
return address_begin_ != 0;
}
void AddressPoolManager::Pool::Reset() {
address_begin_ = 0;
}
void AddressPoolManager::Pool::GetUsedSuperPages(
std::bitset<kMaxSuperPagesInPool>& used) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(IsInitialized());
used = alloc_bitset_;
}
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
PA_DCHECK(IsInitialized());
return address_begin_;
}
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t need_bits = requested_size >> kSuperPageShift;
// Use first-fit policy to find an available chunk from free chunks. Start
// from |bit_hint_|, because we know there are no free chunks before.
size_t beg_bit = bit_hint_;
size_t curr_bit = bit_hint_;
while (true) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_)
return 0;
bool found = true;
for (; curr_bit < end_bit; ++curr_bit) {
if (alloc_bitset_.test(curr_bit)) {
// The bit was set, so this chunk isn't entirely free. Set |found=false|
// to ensure the outer loop continues. However, continue the inner loop
// to set |beg_bit| just past the last set bit in the investigated
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
// next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1;
found = false;
if (bit_hint_ == curr_bit)
++bit_hint_;
}
}
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
// mark as allocated) and return the allocated address.
if (found) {
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(!alloc_bitset_.test(i));
alloc_bitset_.set(i);
}
if (bit_hint_ == beg_bit) {
bit_hint_ = end_bit;
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(address + requested_size <= address_end_);
#endif
return address;
}
}
PA_NOTREACHED();
return 0;
}
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
size_t requested_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
const size_t need_bits = requested_size / kSuperPageSize;
const size_t end_bit = begin_bit + need_bits;
// Check that requested address is not too high.
if (end_bit > total_bits_)
return false;
// Check if any bit of the requested region is set already.
for (size_t i = begin_bit; i < end_bit; ++i) {
if (alloc_bitset_.test(i))
return false;
}
// Otherwise, set the bits.
for (size_t i = begin_bit; i < end_bit; ++i) {
alloc_bitset_.set(i);
}
return true;
}
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address);
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(address + free_size <= address_end_);
#endif
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(alloc_bitset_.test(i));
alloc_bitset_.reset(i);
}
bit_hint_ = std::min(bit_hint_, beg_bit);
}
void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
std::bitset<kMaxSuperPagesInPool> pages;
size_t i;
{
ScopedGuard scoped_lock(lock_);
pages = alloc_bitset_;
i = bit_hint_;
}
stats->usage = pages.count();
size_t largest_run = 0;
size_t current_run = 0;
for (; i < total_bits_; ++i) {
if (!pages[i]) {
current_run += 1;
continue;
} else if (current_run > largest_run) {
largest_run = current_run;
}
current_run = 0;
}
// Fell out of the loop with last bit being zero. Check once more.
if (current_run > largest_run) {
largest_run = current_run;
}
stats->largest_available_reservation = largest_run;
}
void AddressPoolManager::GetPoolStats(const pool_handle handle,
PoolStats* stats) {
Pool* pool = GetPool(handle);
if (!pool->IsInitialized()) {
return;
}
pool->GetStats(stats);
}
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 64-bit pool stats.
GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (IsConfigurablePoolAvailable()) {
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
}
#if BUILDFLAG(ENABLE_PKEYS)
GetPoolStats(kPkeyPoolHandle, &stats->pkey_pool_stats);
#endif
return true;
}
#else // defined(PA_HAS_64_BITS_POINTERS)
static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
0,
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
static_assert(
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
"kGuardOffsetOfBRPPoolBitmap.");
template <size_t bitsize>
void SetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(!bitmap.test(i));
bitmap.set(i);
}
}
template <size_t bitsize>
void ResetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(bitmap.test(i));
bitmap.reset(i);
}
}
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
uintptr_t address =
AllocPages(requested_address, length, kSuperPageSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
return address;
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length) {
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
FreePages(address, length);
}
void AddressPoolManager::MarkUsed(pool_handle handle,
uintptr_t address,
size_t length) {
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (handle == kBRPPoolHandle) {
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
// Make IsManagedByBRPPoolPool() return false when an address inside the
// first or the last PartitionPageSize()-bytes block is given:
//
// ------+---+---------------+---+----
// memory ..... | B | managed by PA | B | ...
// regions ------+---+---------------+---+----
//
// B: PartitionPageSize()-bytes block. This is used internally by the
// allocator and is not available for callers.
//
// This is required to avoid crash caused by the following code:
// {
// // Assume this allocation happens outside of PartitionAlloc.
// raw_ptr<T> ptr = new T[20];
// for (size_t i = 0; i < 20; i ++) { ptr++; }
// // |ptr| may point to an address inside 'B'.
// }
//
// Suppose that |ptr| points to an address inside B after the loop. If
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
// crash, since the memory is not allocated by PartitionAlloc.
SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{
PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
0);
SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
}
}
void AddressPoolManager::MarkUnused(pool_handle handle,
uintptr_t address,
size_t length) {
// Address regions allocated for normal buckets are never released, so this
// function can only be called for direct map. However, do not DCHECK on
// IsManagedByDirectMap(address), because many tests test this function using
// small allocations.
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (handle == kBRPPoolHandle) {
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
// Make IsManagedByBRPPoolPool() return false when an address inside the
// first or the last PartitionPageSize()-bytes block is given.
// (See MarkUsed comment)
ResetBitmap(
AddressPoolManagerBitmap::brp_pool_bits_,
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{
PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
0);
ResetBitmap(
AddressPoolManagerBitmap::regular_pool_bits_,
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
}
}
void AddressPoolManager::ResetForTesting() {
ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
AddressPoolManagerBitmap::regular_pool_bits_.reset();
AddressPoolManagerBitmap::brp_pool_bits_.reset();
}
namespace {
// Counts super pages in use represented by `bitmap`.
template <size_t bitsize>
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
const size_t bits_per_super_page) {
size_t count = 0;
size_t bit_index = 0;
// Stride over super pages.
for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
// Stride over the bits comprising the super page.
for (bit_index = super_page_index * bits_per_super_page;
bit_index < (super_page_index + 1) * bits_per_super_page &&
bit_index < bitsize;
++bit_index) {
if (bitmap[bit_index]) {
count += 1;
// Move on to the next super page.
break;
}
}
}
return count;
}
} // namespace
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
{
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
} // scoped_lock
// Pool usage is read out from the address pool bitmaps.
// The output stats are sized in super pages, so we interpret
// the bitmaps into super page usage.
static_assert(
kSuperPageSize %
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
0,
"information loss when calculating metrics");
constexpr size_t kRegularPoolBitsPerSuperPage =
kSuperPageSize /
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
// Get 32-bit pool usage.
stats->regular_pool_stats.usage =
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
0,
"information loss when calculating metrics");
constexpr size_t kBRPPoolBitsPerSuperPage =
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
stats->brp_pool_stats.usage =
CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
// Get blocklist size.
for (const auto& blocked :
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
if (blocked.load(std::memory_order_relaxed))
stats->blocklist_size += 1;
}
// Count failures in finding non-blocklisted addresses.
stats->blocklist_hit_count =
AddressPoolManagerBitmap::blocklist_hit_count_.load(
std::memory_order_relaxed);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return true;
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{};
if (GetStats(&stats)) {
dumper->DumpStats(&stats);
}
}
} // namespace partition_alloc::internal

View File

@ -0,0 +1,178 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#include <bitset>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
namespace partition_alloc {
class AddressSpaceStatsDumper;
struct AddressSpaceStats;
struct PoolStats;
} // namespace partition_alloc
namespace partition_alloc::internal {
// (64bit version)
// AddressPoolManager takes a reserved virtual address space and manages address
// space allocation.
//
// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a
// contiguous reserved address space. Alloc() takes a pool_handle and returns
// address regions from the specified pool. Free() also takes a pool_handle and
// returns the address region back to the manager.
//
// (32bit version)
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps
// to judge whether a given address is in a pool that supports BackupRefPtr or
// in a pool that doesn't. All PartitionAlloc allocations must be in either of
// the pools.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
public:
static AddressPoolManager& GetInstance();
AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if defined(PA_HAS_64_BITS_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle);
// Populate a |used| bitset of superpages currently in use.
void GetPoolUsedSuperPages(pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used);
// Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif
// Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length);
// Frees address space back to the pool and decommits underlying system pages.
void UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length);
void ResetForTesting();
#if !defined(PA_HAS_64_BITS_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
static bool IsManagedByRegularPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
}
static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
}
#endif // !defined(PA_HAS_64_BITS_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper);
private:
friend class AddressPoolManagerForTesting;
#if BUILDFLAG(ENABLE_PKEYS)
// If we use a pkey pool, we need to tag its metadata with the pkey. Allow the
// function to get access to the pool pointer.
friend void TagGlobalsWithPkey(int pkey);
#endif
constexpr AddressPoolManager() = default;
~AddressPoolManager() = default;
// Populates `stats` if applicable.
// Returns whether `stats` was populated. (They might not be, e.g.
// if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats);
#if defined(PA_HAS_64_BITS_POINTERS)
class Pool {
public:
constexpr Pool() = default;
~Pool() = default;
Pool(const Pool&) = delete;
Pool& operator=(const Pool&) = delete;
void Initialize(uintptr_t ptr, size_t length);
bool IsInitialized();
void Reset();
uintptr_t FindChunk(size_t size);
void FreeChunk(uintptr_t address, size_t size);
bool TryReserveChunk(uintptr_t address, size_t size);
void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
uintptr_t GetBaseAddress();
void GetStats(PoolStats* stats);
private:
Lock lock_;
// The bitset stores the allocation state of the address pool. 1 bit per
// super-page: 1 = allocated, 0 = free.
std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
// An index of a bit in the bitset before which we know for sure there all
// 1s. This is a best-effort hint in the sense that there still may be lots
// of 1s after this index, but at least we know there is no point in
// starting the search before it.
size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
size_t total_bits_ = 0;
uintptr_t address_begin_ = 0;
#if BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t address_end_ = 0;
#endif
};
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
return &aligned_pools_.pools_[handle - 1];
}
// Gets the stats for the pool identified by `handle`, if
// initialized.
void GetPoolStats(pool_handle handle, PoolStats* stats);
// If pkey support is enabled, we need to pkey-tag the pkey pool (which needs
// to be last). For this, we need to add padding in front of the pools so that
// pkey one starts on a page boundary.
struct {
char pad_[PA_PKEY_ARRAY_PAD_SZ(Pool, kNumPools)] = {};
Pool pools_[kNumPools];
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {};
} aligned_pools_ PA_PKEY_ALIGN;
#endif // defined(PA_HAS_64_BITS_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_;
};
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -0,0 +1,37 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc::internal {
namespace {
Lock g_lock;
} // namespace
Lock& AddressPoolManagerBitmap::GetLock() {
return g_lock;
}
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
std::array<std::atomic_bool,
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} // namespace partition_alloc::internal
#endif // !defined(PA_HAS_64_BITS_POINTERS)

View File

@ -0,0 +1,190 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#include <array>
#include <atomic>
#include <bitset>
#include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc {
namespace internal {
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
// support it. All PartitionAlloc allocations must be in either of the pools.
//
// This code is specific to 32-bit systems.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
public:
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
// For BRP pool, we use partition page granularity to eliminate the guard
// pages from the bitmap at the ends:
// - Eliminating the guard page at the beginning is needed so that pointers
// to the end of an allocation that immediately precede a super page in BRP
// pool don't accidentally fall into that pool.
// - Eliminating the guard page at the end is to ensure that the last page
// of the address space isn't in the BRP pool. This allows using sentinels
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
// on an invalid address. (Note, 64-bit systems don't have this problem as
// the upper half of the address space always belongs to the OS.)
//
// Note, direct map allocations also belong to this pool. The same logic as
// above applies. It is important to note, however, that the granularity used
// here has to be a minimum of partition page size and direct map allocation
// granularity. Since DirectMapAllocationGranularity() is no smaller than
// PageAllocationGranularity(), we don't need to decrease the bitmap
// granularity any further.
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
"");
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
static constexpr size_t kBRPPoolBits =
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
// Regular pool may include both normal bucket and direct map allocations, so
// the bitmap granularity has to be at least as small as
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
// the granularity to partition page size.
static constexpr size_t kBitShiftOfRegularPoolBitmap =
DirectMapAllocationGranularityShift();
static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
DirectMapAllocationGranularity();
static_assert(kBytesPer1BitOfRegularPoolBitmap ==
1 << kBitShiftOfRegularPoolBitmap,
"");
static constexpr size_t kRegularPoolBits =
kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
// Returns false for nullptr.
static bool IsManagedByRegularPool(uintptr_t address) {
static_assert(
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
regular_pool_bits_.size(),
"The bitmap is too small, will result in unchecked out of bounds "
"accesses.");
// It is safe to read |regular_pool_bits_| without a lock since the caller
// is responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return PA_TS_UNCHECKED_READ(
regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
}
// Returns false for nullptr.
static bool IsManagedByBRPPool(uintptr_t address) {
static_assert(std::numeric_limits<uintptr_t>::max() >>
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
"The bitmap is too small, will result in unchecked out of "
"bounds accesses.");
// It is safe to read |brp_pool_bits_| without a lock since the caller
// is responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return PA_TS_UNCHECKED_READ(
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
}
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static void BanSuperPageFromBRPPool(uintptr_t address) {
brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
true, std::memory_order_relaxed);
}
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
// The only potentially dangerous scenario, in which this check is used, is
// when the assignment of the first raw_ptr<T> object for an address
// allocated outside the BRP pool is racing with the allocation of a new
// super page at the same address. We assume that if raw_ptr<T> is being
// initialized with a raw pointer, the associated allocation is "alive";
// otherwise, the issue should be fixed by rewriting the raw pointer
// variable as raw_ptr<T>. In the worst case, when such a fix is
// impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
// the problematic field. If the above assumption holds, the existing
// allocation will prevent us from reserving the super-page region and,
// thus, having the race condition. Since we rely on that external
// synchronization, the relaxed memory ordering should be sufficient.
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
std::memory_order_relaxed);
}
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
private:
friend class AddressPoolManager;
static Lock& GetLock();
static std::bitset<kRegularPoolBits> regular_pool_bits_
PA_GUARDED_BY(GetLock());
static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
brp_forbidden_super_page_map_;
static std::atomic_size_t blocklist_hit_count_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
};
} // namespace internal
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
// No need to add IsManagedByConfigurablePool, because Configurable Pool
// doesn't exist on 32-bit.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
#endif
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
#endif
;
}
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
}
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
}
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
uintptr_t address) {
// The Configurable Pool is only available on 64-bit builds.
return false;
}
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
// The Configurable Pool is only available on 64-bit builds.
return false;
}
} // namespace partition_alloc
#endif // !defined(PA_HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -0,0 +1,14 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
namespace partition_alloc::internal {
using pool_handle = unsigned;
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_

View File

@ -0,0 +1,67 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h> // Must be in front of other Windows header files.
#include <versionhelpers.h>
#endif
namespace partition_alloc {
uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if defined(PA_HAS_64_BITS_POINTERS)
random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue());
// The ASLRMask() and ASLROffset() constants will be suitable for the
// OS and build configuration.
#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false;
static bool windows_81_initialized = false;
if (!windows_81_initialized) {
windows_81 = IsWindows8Point1OrGreater();
windows_81_initialized = true;
}
if (!windows_81) {
random &= internal::ASLRMaskBefore8_10();
} else {
random &= internal::ASLRMask();
}
random += internal::ASLROffset();
#else
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#else // defined(PA_HAS_64_BITS_POINTERS)
#if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it.
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
if (!is_wow64)
return 0;
#endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // defined(PA_HAS_64_BITS_POINTERS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random;
}
} // namespace partition_alloc

View File

@ -0,0 +1,290 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "build/build_config.h"
namespace partition_alloc {
// Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
namespace internal {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrAddress(uintptr_t mask) {
return mask & PageAllocationGranularityBaseMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
// Turn off formatting, because the thicket of nested ifdefs below is
// incomprehensible without indentation. It is also incomprehensible with
// indentation, but the only other option is a combinatorial explosion of
// *_{win,linux,mac,foo}_{32,64}.h files.
//
// clang-format off
#if defined(ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// We shouldn't allocate system pages at all for sanitizer builds. However,
// we do, and if random hint addresses interfere with address ranges
// hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See
// https://crbug.com/539863.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrAddress(0x007fffffffffULL);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x7e8000000000ULL);
}
#elif BUILDFLAG(IS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Older
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
// and may cause a carry, use 47 and 43 bit masks. See
// http://www.alex-ionescu.com/?p=246
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(47);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
return AslrMask(43);
}
// Try not to map pages into the range where Windows loads DLLs by default.
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return 0x80000000ULL;
}
#elif BUILDFLAG(IS_APPLE)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
// is destroyed. Using a virtual address space that is too large causes a
// leak of about 1 wired [can never be paged out] page per call to mmap. The
// page is only reclaimed when the process is killed. Confine the hint to a
// 39-bit section of the virtual address space.
//
// This implementation adapted from
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32.
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
// Be careful, there is a zone where macOS will not map memory, at least
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
// [0x1000000000, 0x7000000000). Make sure that the range we use is
// outside these bounds. In 12.3, there is a reserved area between
// MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
// which is reserved on ARM64. See these constants in XNU's source code
// for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
return AslrAddress(0x10000000000ULL);
}
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#if defined(ARCH_CPU_X86_64)
// Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_ARM64)
#if BUILDFLAG(IS_ANDROID)
// Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#elif BUILDFLAG(IS_LINUX)
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
// page size and number of levels of translation pages used. We use
// 39-bit as base as all setups should support this, lowered to 38-bit
// as ASLROffset() could cause a carry.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#else
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
// could cause a carry.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(38);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#endif
#elif defined(ARCH_CPU_PPC64)
#if BUILDFLAG(IS_AIX)
// AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x400000000000ULL);
}
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(42);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#elif defined(ARCH_CPU_S390X)
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(40);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(29);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
#if BUILDFLAG(IS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the
// bottom half of the top half of the address space (that is, the third
// quarter). Because we do not MAP_FIXED, this will be treated only as a
// hint -- the system will not fail to mmap because something else
// happens to already be mapped at our random address. We deliberately
// set the hint high enough to get well above the system's break (that
// is, the heap); Solaris and illumos will try the hint and if that
// fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap.
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x80000000ULL);
}
#elif BUILDFLAG(IS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range.
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x90000000ULL);
}
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7.
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
#endif // BUILDFLAG(IS_POSIX)
#elif defined(ARCH_CPU_32_BITS)
// This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here.
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#else
#error Please tell us about your exotic hardware! Sounds interesting.
#endif // defined(ARCH_CPU_32_BITS)
// clang-format on
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_

View File

@ -0,0 +1,55 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
#include <cstddef>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
namespace partition_alloc {
// All members are measured in super pages.
struct PoolStats {
size_t usage = 0;
// On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case.
#if defined(PA_HAS_64_BITS_POINTERS)
size_t largest_available_reservation = 0;
#endif // defined(PA_HAS_64_BITS_POINTERS)
};
struct AddressSpaceStats {
PoolStats regular_pool_stats;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PoolStats brp_pool_stats;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if defined(PA_HAS_64_BITS_POINTERS)
PoolStats configurable_pool_stats;
#else
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // defined(PA_HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS)
PoolStats pkey_pool_stats;
#endif
};
// Interface passed to `AddressPoolManager::DumpStats()` to mediate
// for `AddressSpaceDumpProvider`.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
public:
virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
};
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_

View File

@ -0,0 +1,41 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#if defined(PA_HAS_ALLOCATION_GUARD)
namespace partition_alloc {
namespace {
thread_local bool g_disallow_allocations;
} // namespace
ScopedDisallowAllocations::ScopedDisallowAllocations() {
if (g_disallow_allocations)
PA_IMMEDIATE_CRASH();
g_disallow_allocations = true;
}
ScopedDisallowAllocations::~ScopedDisallowAllocations() {
g_disallow_allocations = false;
}
ScopedAllowAllocations::ScopedAllowAllocations() {
// Save the previous value, as ScopedAllowAllocations is used in all
// partitions, not just the malloc() ones(s).
saved_value_ = g_disallow_allocations;
g_disallow_allocations = false;
}
ScopedAllowAllocations::~ScopedAllowAllocations() {
g_disallow_allocations = saved_value_;
}
} // namespace partition_alloc
#endif // defined(PA_HAS_ALLOCATION_GUARD)

View File

@ -0,0 +1,49 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "build/build_config.h"
namespace partition_alloc {
#if defined(PA_HAS_ALLOCATION_GUARD)
// Disallow allocations in the scope. Does not nest.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
public:
ScopedDisallowAllocations();
~ScopedDisallowAllocations();
};
// Disallow allocations in the scope. Does not nest.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
public:
ScopedAllowAllocations();
~ScopedAllowAllocations();
private:
bool saved_value_;
};
#else
struct [[maybe_unused]] ScopedDisallowAllocations{};
struct [[maybe_unused]] ScopedAllowAllocations{};
#endif // defined(PA_HAS_ALLOCATION_GUARD)
} // namespace partition_alloc
namespace base::internal {
using ::partition_alloc::ScopedAllowAllocations;
using ::partition_alloc::ScopedDisallowAllocations;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_

View File

@ -0,0 +1,50 @@
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file contains a test function for checking Arm's branch target
# identification (BTI) feature, which helps mitigate jump-oriented
# programming. To get it working, BTI instructions must be executed
# on a compatible core, and the executable pages must be mapped with
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
# correctly:
# 1) Allocate a read-write page.
# 2) Copy between the start and end symbols into that page.
# 3) Set the page to read-execute with PROT_BTI.
# 4) Call the first offset of the page, verify the result.
# 5) Call the second offset of the page (skipping the landing pad).
# Verify that it crashes as expected.
# This test works irrespective of whether BTI is enabled for C/C++
# objects via -mbranch-protection=standard.
.text
.global arm_bti_test_function
.global arm_bti_test_function_invalid_offset
.global arm_bti_test_function_end
arm_bti_test_function:
# Mark the start of this function as a valid call target.
bti jc
add x0, x0, #1
arm_bti_test_function_invalid_offset:
# This label simulates calling an incomplete function.
# Jumping here should crash systems which support BTI.
add x0, x0, #2
ret
arm_bti_test_function_end:
nop
// For details see section "6.2 Program Property" in
// "ELF for the Arm 64-bit Architecture (AArch64)"
// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property
.pushsection .note.gnu.property, "a";
.balign 8;
.long 4;
.long 0x10;
.long 0x5;
.asciz "GNU";
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4;
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
.long 0;
.popsection

View File

@ -0,0 +1,31 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
#include "build/build_config.h"
#if defined(ARCH_CPU_ARM64)
extern "C" {
/**
* A valid BTI function. Jumping to this funtion should not cause any problem in
* a BTI enabled environment.
**/
int64_t arm_bti_test_function(int64_t);
/**
* A function without proper BTI landing pad. Jumping here should crash the
* program on systems which support BTI.
**/
int64_t arm_bti_test_function_invalid_offset(int64_t);
/**
* A simple function which immediately returns to sender.
**/
void arm_bti_test_function_end(void);
}
#endif // defined(ARCH_CPU_ARM64)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_

View File

@ -0,0 +1,118 @@
# Build Config
PartitionAlloc's behavior and operation can be influenced by many
different settings. Broadly, these are controlled at the top-level by
[GN args][gn-declare-args], which propagate via
[buildflags][buildflag-header] and `#defined` clauses.
*** promo
Most of what you'll want to know exists between
* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
* Everything else ending in `.gn` or `.gni` in
`//base/allocator/partition_allocator/`,
* [`allocator.gni`][allocator-gni],
* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
* [`//base/BUILD.gn`][base-build-gn].
***
*** aside
While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of
PartitionAlloc's behavior is governed by compound conditions `#defined`
in [`partition_alloc_config.h`][partition-alloc-config].
***
*** promo
PartitionAlloc targets C++17. As the team develops standalone
PartitionAlloc, this may diverge from what the rest of Chrome browser
does, as we will be obligated to support external clients that
may not yet support newer C++ standards.
See [Chrome-External Builds](./external_builds.md) for more.
***
## Select GN Args
### `use_partition_alloc`
Defines whether PartitionAlloc is at all available.
Setting this `false` will entirely remove PartitionAlloc from the
Chromium build. _You probably do not want this._
*** note
Back when PartitionAlloc was the dedicated allocator in Blink, disabling
it was logically identical to wholly disabling it in Chromium. This GN
arg organically grew in scope with the advent of
PartitionAlloc-Everywhere and must be `true` as a prerequisite for
enabling PA-E.
***
### `use_partition_alloc_as_malloc`
Does nothing special when value is `false`. Enables
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is `true`.
*** note
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
includes Blink where PartitionAlloc originated, setting
`use_partition_alloc_as_malloc = false` does not disable PA usage in Blink,
which invokes PA explicitly (not via malloc).
* `use_partition_alloc_as_malloc = true` must not be confused
with `use_partition_alloc` (see above).
***
## Note: Component Builds
When working on PartitionAlloc, know that `is_debug` defaults to
implying `is_component_build`, which interferes with the allocator
shim. A typical set of GN args should include
```none
is_debug = true
is_component_build = false
```
Conversely, build configurations that have `is_component_build = true`
without explicitly specifying PA-specific args will not build with PA-E
enabled.
## Notable Macros
There is an ongoing effort
[to break out PartitionAlloc into a standalone library][pa-ee-crbug].
Once PartitionAlloc stands alone from the larger Chrome build apparatus,
the code loses access to some macros. This is not an immediate concern,
but the team needs to decide either
* how to propagate these macros in place, or
* how to remove them, replacing them with PA-specific build config.
A non-exhaustive list of work items:
* `OFFICIAL_BUILD` - influences crash macros and
`PA_THREAD_CACHE_ALLOC_STATS`. These are conceptually distinct enough
to be worth separating into dedicated build controls.
* `IS_PARTITION_ALLOC_IMPL` - must be defined when PartitionAlloc is
built as a shared library. This is required to export symbols.
* `COMPONENT_BUILD` - component builds (as per
`//docs/component_build.md`) must `#define COMPONENT_BUILD`.
Additionally, to build Win32, invoker must `#define WIN32`.
* `MEMORY_TOOL_REPLACES_ALLOCATOR`
* `*_SANITIZER` - mainly influences unit tests.
*** note
Over time, the above list should evolve into a list of macros / GN args
that influence PartitionAlloc's behavior.
***
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h
[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
[pa-ee-crbug]: https://crbug.com/1151236

View File

@ -0,0 +1,9 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file will be used to check out PartitionAlloc and to build it as
# standalone library. In this case, PartitionAlloc needs to define
# build_with_chromium. If building PartitionAlloc as a part of chromium,
# chromium will provide build_with_chromium=true.
build_with_chromium = false

View File

@ -0,0 +1,19 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build_overrides/build.gni")
# This is the default build configuration when building PartitionAlloc
# as a standalone library.
# If embedders want to use PartitionAlloc, they need to create their own
# //build_overrides/partition_alloc.gni and define their own PartitionAlloc
# configuration.
use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false

View File

@ -0,0 +1,28 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/compressed_pointer.h"
#if defined(PA_POINTER_COMPRESSION)
namespace partition_alloc::internal {
// We keep the useful part in |g_base_| as 1s to speed up decompression.
alignas(kPartitionCachelineSize)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) CompressedPointerBaseGlobal::Base
CompressedPointerBaseGlobal::g_base_ = {.base = kUsefulBitsMask};
void CompressedPointerBaseGlobal::SetBase(uintptr_t base) {
PA_DCHECK(!IsSet());
PA_DCHECK((base & kUsefulBitsMask) == 0);
g_base_.base = base | kUsefulBitsMask;
}
void CompressedPointerBaseGlobal::ResetBaseForTesting() {
g_base_.base = kUsefulBitsMask;
}
} // namespace partition_alloc::internal
#endif // defined(PA_POINTER_COMPRESSION)

View File

@ -0,0 +1,666 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_
#include <climits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#if defined(PA_POINTER_COMPRESSION)
#if !defined(PA_GLUE_CORE_POOLS)
#error "Pointer compression only works with glued pools"
#endif //! defined(PA_GLUE_CORE_POOLS)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#error "Pointer compression currently supports constant pool size"
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#endif // defined(PA_POINTER_COMPRESSION)
namespace partition_alloc {
namespace internal {
template <typename T1, typename T2>
constexpr bool IsDecayedSame =
std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>;
#if defined(PA_POINTER_COMPRESSION)
// Pointer compression works by storing only the 'useful' 32-bit part of the
// pointer. The other half (the base) is stored in a global variable
// (CompressedPointerBaseGlobal::g_base_), which is used on decompression. To
// support fast branchless decompression of nullptr, we use the most significant
// bit in the compressed pointer to leverage sign-extension (for non-nullptr
// pointers, the most significant bit is set, whereas for nullptr it's not).
// Using this bit and supporting heaps larger than 4GB relies on having
// alignment bits in pointers. Assuming that all pointers point to at least
// 8-byte alignment objects, pointer compression can support heaps of size <=
// 16GB.
// ((3 alignment bits) = (1 bit for sign-extension) + (2 bits for 16GB heap)).
//
// Example: heap base: 0x4b0'ffffffff
// - g_base: 0x4b3'ffffffff (lower 34 bits set)
// - normal pointer: 0x4b2'a08b6480
// - compression:
// - shift right by 3: 0x96'54116c90
// - truncate: 0x54116c90
// - mark MSB: 0xd4116c90
// - decompression:
// - sign-extend: 0xffffffff'd4116c90
// - shift left by 3: 0xfffffffe'a08b6480
// - 'and' with g_base: 0x000004b2'a08b6480
//
// - nullptr: 0x00000000'00000000
// - compression:
// - shift right by 3: 0x00000000'00000000
// - truncate: 0x00000000
// - (don't mark MSB for nullptr)
// - decompression:
// - sign-extend: 0x00000000'00000000
// - shift left by 3: 0x00000000'00000000
// - 'and' with g_base: 0x00000000'00000000
//
// Pointer compression relies on having both the regular and the BRP pool (core
// pools) 'glued', so that the same base could be used for both. For simplicity,
// the configurations with dynamically selected pool size are not supported.
// However, they can be at the cost of performing an extra load for
// core-pools-shift-size on both compression and decompression.
class CompressedPointerBaseGlobal final {
public:
static constexpr size_t kUsefulBits =
base::bits::CountTrailingZeroBits(PartitionAddressSpace::CorePoolsSize());
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
static constexpr size_t kBitsToShift =
kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
CompressedPointerBaseGlobal() = delete;
// Attribute const allows the compiler to assume that
// CompressedPointerBaseGlobal::g_base_ doesn't change (e.g. across calls) and
// thereby avoid redundant loads.
PA_ALWAYS_INLINE __attribute__((const)) static uintptr_t Get() {
PA_DCHECK(IsBaseConsistent());
return g_base_.base;
}
PA_ALWAYS_INLINE static bool IsSet() {
PA_DCHECK(IsBaseConsistent());
return (g_base_.base & ~kUsefulBitsMask) != 0;
}
private:
static constexpr uintptr_t kUsefulBitsMask =
PartitionAddressSpace::CorePoolsSize() - 1;
static union alignas(kPartitionCachelineSize)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
uintptr_t base;
char cache_line[kPartitionCachelineSize];
} g_base_ PA_CONSTINIT;
PA_ALWAYS_INLINE static bool IsBaseConsistent() {
return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
}
static void SetBase(uintptr_t base);
static void ResetBaseForTesting();
friend class PartitionAddressSpace;
};
#endif // defined(PA_POINTER_COMPRESSION)
} // namespace internal
#if defined(PA_POINTER_COMPRESSION)
template <typename T>
class PA_TRIVIAL_ABI CompressedPointer final {
public:
using UnderlyingType = uint32_t;
PA_ALWAYS_INLINE constexpr CompressedPointer() = default;
PA_ALWAYS_INLINE explicit CompressedPointer(T* ptr) : value_(Compress(ptr)) {}
PA_ALWAYS_INLINE constexpr explicit CompressedPointer(std::nullptr_t)
: value_(0u) {}
PA_ALWAYS_INLINE constexpr CompressedPointer(const CompressedPointer&) =
default;
PA_ALWAYS_INLINE constexpr CompressedPointer(
CompressedPointer&& other) noexcept = default;
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr CompressedPointer(
const CompressedPointer<U>& other) {
if constexpr (internal::IsDecayedSame<T, U>) {
// When pointers have the same type modulo constness, avoid the
// compress-decompress round.
value_ = other.value_;
} else {
// When the types are different, perform the round, because the pointer
// may need to be adjusted.
// TODO(1376980): Avoid the cycle here.
value_ = Compress(other.get());
}
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr CompressedPointer(
CompressedPointer<U>&& other) noexcept
: CompressedPointer(other) {}
~CompressedPointer() = default;
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
const CompressedPointer&) = default;
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
CompressedPointer&& other) noexcept = default;
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
const CompressedPointer<U>& other) {
CompressedPointer copy(other);
value_ = copy.value_;
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
CompressedPointer<U>&& other) noexcept {
*this = other;
return *this;
}
// Don't perform compression when assigning to nullptr.
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(std::nullptr_t) {
value_ = 0u;
return *this;
}
PA_ALWAYS_INLINE T* get() const { return Decompress(value_); }
PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return value_; }
PA_ALWAYS_INLINE constexpr UnderlyingType GetAsIntegral() const {
return value_;
}
PA_ALWAYS_INLINE constexpr explicit operator bool() const {
return is_nonnull();
}
template <typename U = T,
std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
PA_ALWAYS_INLINE U& operator*() const {
PA_DCHECK(is_nonnull());
return *get();
}
PA_ALWAYS_INLINE T* operator->() const {
PA_DCHECK(is_nonnull());
return get();
}
PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& other) {
std::swap(value_, other.value_);
}
private:
template <typename>
friend class CompressedPointer;
static constexpr size_t kBitsForSignExtension = 1;
static constexpr size_t kOverallBitsToShift =
internal::CompressedPointerBaseGlobal::kBitsToShift +
kBitsForSignExtension;
static PA_ALWAYS_INLINE UnderlyingType Compress(T* ptr) {
static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
0);
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
static constexpr size_t kCorePoolsBaseMask =
~(internal::PartitionAddressSpace::CorePoolsSize() - 1);
PA_DCHECK(!ptr ||
(base & kCorePoolsBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer and truncate.
auto compressed = static_cast<UnderlyingType>(uptr >> kOverallBitsToShift);
// If the pointer is non-null, mark the most-significant-bit to sign-extend
// it on decompression. Assuming compression is a significantly less
// frequent operation, we let more work here in favor of faster
// decompression.
// TODO(1376980): Avoid this by overreserving the heap.
if (compressed)
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
return compressed;
}
static PA_ALWAYS_INLINE T* Decompress(UnderlyingType ptr) {
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
// Treat compressed pointer as signed and cast it to uint64_t, which will
// sign-extend it. Then, shift the result by one. It's important to shift
// the already unsigned value, as otherwise it would result in undefined
// behavior.
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr))
<< (kOverallBitsToShift);
return reinterpret_cast<T*>(mask & base);
}
UnderlyingType value_;
};
template <typename T>
PA_ALWAYS_INLINE constexpr void swap(CompressedPointer<T>& a,
CompressedPointer<T>& b) {
a.swap(b);
}
// operators==.
template <typename T, typename U>
PA_ALWAYS_INLINE bool operator==(CompressedPointer<T> a,
CompressedPointer<U> b) {
if constexpr (internal::IsDecayedSame<T, U>) {
// When pointers have the same type modulo constness, simply compare
// compressed values.
return a.GetAsIntegral() == b.GetAsIntegral();
} else {
// When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here.
return a.get() == b.get();
}
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a == static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(T* a, CompressedPointer<U> b) {
return b == a;
}
template <typename T>
PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a,
std::nullptr_t) {
return !a.is_nonnull();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
CompressedPointer<U> b) {
return b == nullptr;
}
// operators!=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
CompressedPointer<U> b) {
return !(a == b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a != static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(T* a, CompressedPointer<U> b) {
return b != a;
}
template <typename T>
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
std::nullptr_t) {
return a.is_nonnull();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
CompressedPointer<U> b) {
return b != nullptr;
}
// operators<.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a,
CompressedPointer<U> b) {
if constexpr (internal::IsDecayedSame<T, U>) {
// When pointers have the same type modulo constness, simply compare
// compressed values.
return a.GetAsIntegral() < b.GetAsIntegral();
} else {
// When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here.
return a.get() < b.get();
}
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a < static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(T* a, CompressedPointer<U> b) {
// Do compression, since it is less expensive.
return static_cast<CompressedPointer<T>>(a) < b;
}
// operators<=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a,
CompressedPointer<U> b) {
if constexpr (internal::IsDecayedSame<T, U>) {
// When pointers have the same type modulo constness, simply compare
// compressed values.
return a.GetAsIntegral() <= b.GetAsIntegral();
} else {
// When the types are different, compare decompressed pointers, because the
// pointers may need to be adjusted.
// TODO(1376980): Avoid decompression here.
return a.get() <= b.get();
}
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a <= static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(T* a, CompressedPointer<U> b) {
// Do compression, since it is less expensive.
return static_cast<CompressedPointer<T>>(a) <= b;
}
// operators>.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a,
CompressedPointer<U> b) {
return !(a <= b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a > static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(T* a, CompressedPointer<U> b) {
// Do compression, since it is less expensive.
return static_cast<CompressedPointer<T>>(a) > b;
}
// operators>=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a,
CompressedPointer<U> b) {
return !(a < b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a, U* b) {
// Do compression, since it is less expensive.
return a >= static_cast<CompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer<U> b) {
// Do compression, since it is less expensive.
return static_cast<CompressedPointer<T>>(a) >= b;
}
#endif // defined(PA_POINTER_COMPRESSION)
// Simple wrapper over the raw pointer.
template <typename T>
class PA_TRIVIAL_ABI UncompressedPointer final {
public:
PA_ALWAYS_INLINE constexpr UncompressedPointer() = default;
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(T* ptr) : ptr_(ptr) {}
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(std::nullptr_t)
: ptr_(nullptr) {}
PA_ALWAYS_INLINE constexpr UncompressedPointer(const UncompressedPointer&) =
default;
PA_ALWAYS_INLINE constexpr UncompressedPointer(
UncompressedPointer&& other) noexcept = default;
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr UncompressedPointer(
const UncompressedPointer<U>& other)
: ptr_(other.ptr_) {}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr UncompressedPointer(
UncompressedPointer<U>&& other) noexcept
: ptr_(std::move(other.ptr_)) {}
~UncompressedPointer() = default;
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
const UncompressedPointer&) = default;
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
UncompressedPointer&& other) noexcept = default;
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
const UncompressedPointer<U>& other) {
ptr_ = other.ptr_;
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
UncompressedPointer<U>&& other) noexcept {
ptr_ = std::move(other.ptr_);
return *this;
}
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(std::nullptr_t) {
ptr_ = nullptr;
return *this;
}
PA_ALWAYS_INLINE constexpr T* get() const { return ptr_; }
PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return ptr_; }
PA_ALWAYS_INLINE constexpr explicit operator bool() const {
return is_nonnull();
}
template <typename U = T,
std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
PA_ALWAYS_INLINE constexpr U& operator*() const {
PA_DCHECK(is_nonnull());
return *get();
}
PA_ALWAYS_INLINE constexpr T* operator->() const {
PA_DCHECK(is_nonnull());
return get();
}
PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& other) {
std::swap(ptr_, other.ptr_);
}
private:
template <typename>
friend class UncompressedPointer;
T* ptr_;
};
template <typename T>
PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer<T>& a,
UncompressedPointer<T>& b) {
a.swap(b);
}
// operators==.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return a.get() == b.get();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a, U* b) {
return a == static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(T* a, UncompressedPointer<U> b) {
return b == a;
}
template <typename T>
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
std::nullptr_t) {
return !a.is_nonnull();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
UncompressedPointer<U> b) {
return b == nullptr;
}
// operators!=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return !(a == b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a, U* b) {
return a != static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(T* a, UncompressedPointer<U> b) {
return b != a;
}
template <typename T>
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
std::nullptr_t) {
return a.is_nonnull();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
UncompressedPointer<U> b) {
return b != nullptr;
}
// operators<.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return a.get() < b.get();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a, U* b) {
return a < static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<(T* a, UncompressedPointer<U> b) {
return static_cast<UncompressedPointer<T>>(a) < b;
}
// operators<=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return a.get() <= b.get();
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a, U* b) {
return a <= static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator<=(T* a, UncompressedPointer<U> b) {
return static_cast<UncompressedPointer<T>>(a) <= b;
}
// operators>.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return !(a <= b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a, U* b) {
return a > static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>(T* a, UncompressedPointer<U> b) {
return static_cast<UncompressedPointer<T>>(a) > b;
}
// operators>=.
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a,
UncompressedPointer<U> b) {
return !(a < b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a, U* b) {
return a >= static_cast<UncompressedPointer<U>>(b);
}
template <typename T, typename U>
PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer<U> b) {
return static_cast<UncompressedPointer<T>>(a) >= b;
}
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_

View File

@ -0,0 +1,75 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
namespace partition_alloc {
namespace {
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
[](uintptr_t) {};
bool g_unretained_dangling_raw_ptr_check_enabled = false;
} // namespace
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
PA_DCHECK(g_dangling_raw_ptr_detected_fn);
return g_dangling_raw_ptr_detected_fn;
}
DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
PA_DCHECK(g_dangling_raw_ptr_released_fn);
return g_dangling_raw_ptr_released_fn;
}
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
PA_DCHECK(fn);
g_dangling_raw_ptr_detected_fn = fn;
}
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
PA_DCHECK(fn);
g_dangling_raw_ptr_released_fn = fn;
}
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
return g_unretained_dangling_raw_ptr_detected_fn;
}
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
PA_DCHECK(fn);
g_unretained_dangling_raw_ptr_detected_fn = fn;
}
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
bool old = g_unretained_dangling_raw_ptr_check_enabled;
g_unretained_dangling_raw_ptr_check_enabled = enabled;
return old;
}
namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
g_dangling_raw_ptr_detected_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
g_dangling_raw_ptr_released_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id) {
g_unretained_dangling_raw_ptr_detected_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled() {
return g_unretained_dangling_raw_ptr_check_enabled;
}
} // namespace internal
} // namespace partition_alloc

View File

@ -0,0 +1,67 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
// raw_ptr are reported. Its behavior can be configured here.
//
// Purpose of this level of indirection:
// - Ease testing.
// - Keep partition_alloc/ independent from base/. In most cases, when a
// dangling raw_ptr is detected/released, this involves recording a
// base::debug::StackTrace, which isn't desirable inside partition_alloc/.
// - Be able (potentially) to turn this feature on/off at runtime based on
// dependant's flags.
namespace partition_alloc {
// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
// memory region and the allocator is asked to release it.
//
// It won't be called again with the same `id`, up until (potentially) a call to
// DanglingRawPtrReleased(`id`) is made.
//
// This function is called from within the allocator, and is not allowed to
// allocate memory.
using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
// last dangling raw_ptr stops referencing the memory region.
//
// This function is allowed to allocate memory.
using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled();
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_

View File

@ -0,0 +1,23 @@
digraph G {
graph[bgcolor=transparent]
node[shape=box,style="filled,rounded",color=deepskyblue]
subgraph cluster_tc {
label = "Thread Cache"
rankdir = LR
{rank=same;TLS1,TLS2,TLSn}
TLS1->TLS2[style=invisible,dir=none]
TLS2->TLSn[style=dotted,dir=none]
}
subgraph cluster_central {
label = "Central Allocator (per-partition lock)"
fast[label="slot span freelists (fast path)"]
slow[label="slot span management (slow path)"]
# Forces slow path node beneath fast path node.
fast->slow[style=invisible,dir=none]
}
# Forces thread-external subgraph beneath thread cache subgraph.
TLS2->fast[style=invisible,dir=none]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -0,0 +1,95 @@
digraph G {
graph[bgcolor=transparent]
node[shape=plaintext]
edge[style=dashed]
invisible_a[label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR>
<TD PORT="red" WIDTH="100"></TD>
<TD PORT="green" WIDTH="20"></TD>
<TD PORT="blue" WIDTH="40"></TD>
<TD PORT="gold" WIDTH="300"></TD>
<TD PORT="pink" WIDTH="60"></TD>
</TR>
</TABLE>
>]
superpage[xlabel="Super Page",label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
<TR>
<!-- Head Partition Page -->
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
<TD PORT="metadata"></TD>
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
<!-- Bitmaps -->
<TD WIDTH="100">Bitmaps(?)</TD>
<!-- Several Slot Spans -->
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
<TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
<TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
<TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
<TD WIDTH="79">...</TD>
<!-- Tail Partition Page -->
<TD BGCOLOR="darkgrey" WIDTH="39"></TD>
</TR>
</TABLE>
>]
invisible_b[label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR>
<TD PORT="green" WIDTH="30"></TD>
<TD PORT="blue" WIDTH="60"></TD>
<TD PORT="gold" WIDTH="180"></TD>
<TD PORT="red" WIDTH="90"></TD>
<TD PORT="pink" WIDTH="90"></TD>
</TR>
</TABLE>
>]
metadata_page[xlabel="Metadata",label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD>
<!-- Bitmaps Offset -->
<TD> B? </TD>
<!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD>
<TD BGCOLOR="crimson">+</TD>
<!-- Green Slot Span Metadata -->
<TD BGCOLOR="palegreen">v</TD>
<!-- Blue Slot Span Metadata -->
<TD BGCOLOR="cornflowerblue">v</TD>
<TD BGCOLOR="cornflowerblue">+</TD>
<!-- Gold Slot Span Metadata -->
<TD BGCOLOR="gold">v</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD>
<TD BGCOLOR="crimson">+</TD>
<!-- Pink Slot Span Metadata -->
<TD BGCOLOR="deeppink">v</TD>
<!-- etc. -->
<TD WIDTH="64">...</TD>
<!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD>
</TR>
</TABLE>
>]
invisible_a:red->superpage:red->superpage:red2[color=crimson]
superpage:red2->invisible_b:red[color=crimson]
invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
superpage:metadata->metadata_page[style="",arrowhead=odot]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -0,0 +1,97 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/extended_api.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace partition_alloc::internal {
#if defined(PA_THREAD_CACHE_SUPPORTED)
namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->flags.with_thread_cache)
return;
ThreadCacheRegistry::Instance().PurgeAll();
root->flags.with_thread_cache = false;
// Doesn't destroy the thread cache object(s). For background threads, they
// will be collected (and free cached memory) at thread destruction
// time. For the main thread, we leak it.
}
void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) {
if (!root)
return;
root->flags.with_thread_cache = true;
}
#if BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
void DisablePartitionAllocThreadCacheForProcess() {
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
auto* aligned_allocator =
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
DisableThreadCacheForRootIfEnabled(regular_allocator);
if (aligned_allocator != regular_allocator)
DisableThreadCacheForRootIfEnabled(aligned_allocator);
DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
}
#endif // defined(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
} // namespace
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if defined(PA_THREAD_CACHE_SUPPORTED)
#if BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
DisablePartitionAllocThreadCacheForProcess();
#else
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
#endif // BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
ThreadCache::SwapForTesting(root);
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if defined(PA_THREAD_CACHE_SUPPORTED)
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root);
#if BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
ThreadCache::SwapForTesting(regular_allocator);
#else
ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}
ThreadAllocStats GetAllocStatsForCurrentThread() {
ThreadCache* thread_cache = ThreadCache::Get();
if (ThreadCache::IsValid(thread_cache))
return thread_cache->thread_alloc_stats();
return {};
}
} // namespace partition_alloc::internal

View File

@ -0,0 +1,29 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace partition_alloc::internal {
// These two functions are unsafe to run if there are multiple threads running
// in the process.
//
// Disables the thread cache for the entire process, and replaces it with a
// thread cache for |root|.
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Disables the current thread cache, and replaces it with the default for the
// process.
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Get allocation stats for the thread cache partition on the current
// thread. See the documentation of ThreadAllocStats for details.
ThreadAllocStats GetAllocStatsForCurrentThread();
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -0,0 +1,87 @@
# Chrome-External Builds
Work is ongoing to make PartitionAlloc a standalone library. The
standalone repository for PartitionAlloc is hosted
[here][standalone-PA-repo].
## GN Args
External clients mainly need to set these six GN args:
``` none
# These are blocked on PA-E and `raw_ptr.h` and can never be true until
# we make them part of the standalone PA distribution.
use_partition_alloc_as_malloc_default = false
enable_mte_checked_ptr_support_default = false
enable_backup_ref_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
```
PartitionAlloc's build will expect them at
`//build_overrides/partition_alloc.gni`.
In addition, something must provide `build_with_chromium = false` to
the PA build system.
## `use_partition_alloc`
The `use_partition_alloc` GN arg, described in
[`build_config.md`](./build_config.md), provides a GN-level seam that
embedders
1. can set in their GN args and
2. should observe in their GN recipes to conditionally pull in
PartitionAlloc.
I.E. if you have any reason to disable PartitionAlloc, you should do so
with this GN arg. Avoid pulling in PartitionAlloc headers when the
corresponding buildflag is false.
Setting `use_partition_alloc` false will also implicitly disable other
features, e.g. nixing the compilation of BackupRefPtr as the
implementation of `raw_ptr<T>`.
## Periodic Memory Reduction Routines
PartitionAlloc provides APIs to
* reclaim memory (see [memory\_reclaimer.h](./memory_reclaimer.h)) and
* purge thread caches (see [thread\_cache.h](./thread_cache.h)).
Both of these must be called by the embedder external to PartitionAlloc.
PA provides neither an event loop nor timers of its own, delegating this
to its clients.
## Build Considerations
External clients create constraints on PartitionAlloc's implementation.
### C++17
PartitionAlloc targets C++17. This is aligned with our first external
client, PDFium, and may be further constrained by other clients. These
impositions prevent us from moving in lockstep with Chrome's target
C++ version.
We do not even have guarantees of backported future features, e.g.
C++20's designated initializers. Therefore, these cannot ship with
PartitionAlloc.
### MSVC Support
PDFium supports MSVC. PartitionAlloc will have to match it.
### MSVC Constraint: No Inline Assembly
MSVC's syntax for `asm` blocks differs from the one widely adopted in
parts of Chrome. But more generally,
[MSVC doesn't support inline assembly on ARM and x64 processors][msvc-inline-assembly].
Assembly blocks should be gated behind compiler-specific flags and
replaced with intrinsics in the presence of `COMPILER_MSVC` (absent
`__clang__`).
[standalone-PA-repo]: https://chromium.googlesource.com/chromium/src/base/allocator/partition_allocator.git
[msvc-inline-assembly]: https://docs.microsoft.com/en-us/cpp/assembler/inline/inline-assembler?view=msvc-170

View File

@ -0,0 +1,140 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_
#include <climits>
#include <cstdint>
#include <utility>
#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if BUILDFLAG(USE_FREESLOT_BITMAP)
namespace partition_alloc::internal {
PA_ALWAYS_INLINE uintptr_t GetFreeSlotBitmapAddressForPointer(uintptr_t ptr) {
uintptr_t super_page = ptr & kSuperPageBaseMask;
return SuperPageFreeSlotBitmapAddr(super_page);
}
// Calculates the cell address and the offset inside the cell corresponding to
// the |slot_start|.
PA_ALWAYS_INLINE std::pair<FreeSlotBitmapCellType*, size_t>
GetFreeSlotBitmapCellPtrAndBitIndex(uintptr_t slot_start) {
uintptr_t slot_superpage_offset = slot_start & kSuperPageOffsetMask;
uintptr_t superpage_bitmap_start =
GetFreeSlotBitmapAddressForPointer(slot_start);
uintptr_t cell_addr = base::bits::AlignDown(
superpage_bitmap_start +
(slot_superpage_offset / kSmallestBucket) / CHAR_BIT,
sizeof(FreeSlotBitmapCellType));
PA_DCHECK(cell_addr < superpage_bitmap_start + kFreeSlotBitmapSize);
size_t bit_index =
(slot_superpage_offset / kSmallestBucket) & kFreeSlotBitmapOffsetMask;
PA_DCHECK(bit_index < kFreeSlotBitmapBitsPerCell);
return {reinterpret_cast<FreeSlotBitmapCellType*>(cell_addr), bit_index};
}
// This bitmap marks the used slot as 0 and free one as 1. This is because we
// would like to set all the slots as "used" by default to prevent allocating a
// used slot when the freelist entry is overwritten. The state of the bitmap is
// expected to be synced with freelist (i.e. the bitmap is set to 1 if and only
// if the slot is in the freelist).
PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithAOne(size_t n) {
return static_cast<FreeSlotBitmapCellType>(1) << n;
}
PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithTrailingOnes(size_t n) {
return (static_cast<FreeSlotBitmapCellType>(1) << n) -
static_cast<FreeSlotBitmapCellType>(1);
}
// Returns true if the bit corresponding to |slot_start| is used( = 0)
PA_ALWAYS_INLINE bool FreeSlotBitmapSlotIsUsed(uintptr_t slot_start) {
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
return (*cell & CellWithAOne(bit_index)) == 0;
}
// Mark the bit corresponding to |slot_start| as used( = 0).
PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsUsed(uintptr_t slot_start) {
PA_CHECK(!FreeSlotBitmapSlotIsUsed(slot_start));
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
*cell &= ~CellWithAOne(bit_index);
}
// Mark the bit corresponding to |slot_start| as free( = 1).
PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsFree(uintptr_t slot_start) {
PA_CHECK(FreeSlotBitmapSlotIsUsed(slot_start));
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
*cell |= CellWithAOne(bit_index);
}
// Resets (= set to 0) all the bits corresponding to the slot-start addresses
// within [begin_addr, end_addr). |begin_addr| has to be the beginning of a
// slot, but |end_addr| does not.
PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
uintptr_t end_addr,
uintptr_t slot_size) {
PA_DCHECK(begin_addr <= end_addr);
// |end_addr| has to be kSmallestBucket-aligned.
PA_DCHECK((end_addr & (kSmallestBucket - 1)) == 0u);
for (uintptr_t slot_start = begin_addr; slot_start < end_addr;
slot_start += slot_size) {
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
*cell &= ~CellWithAOne(bit_index);
}
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Checks if the cells that are meant to contain only unset bits are really 0.
auto [begin_cell, begin_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
auto [end_cell, end_bit_index] =
GetFreeSlotBitmapCellPtrAndBitIndex(end_addr);
// The bits that should be marked to 0 are |begin_bit_index|th bit of
// |begin_cell| to |end_bit_index - 1|th bit of |end_cell|. We verify all the
// bits are set to 0 for the cells between [begin_cell + 1, end_cell). For the
// |begin_cell| and |end_cell|, we have to handle them separately to only
// check the partial bits.
// | begin_cell | |...| | end_cell |
// |11...100...0|0...0|...|0...0|0...01...1|
// ^ ^
// | |
// begin_addr end_addr
if (begin_cell == end_cell) {
PA_DCHECK((*begin_cell & (~CellWithTrailingOnes(begin_bit_index) &
CellWithTrailingOnes(end_bit_index))) == 0u);
}
if (begin_bit_index != 0) {
// Checks the bits between [begin_bit_index, kFreeSlotBitmapBitsPerCell) in
// the begin_cell are 0
PA_DCHECK((*begin_cell & ~CellWithTrailingOnes(begin_bit_index)) == 0u);
++begin_cell;
}
if (end_bit_index != 0) {
// Checks the bits between [0, end_bit_index) in the end_cell are 0
PA_DCHECK((*end_cell & CellWithTrailingOnes(end_bit_index)) == 0u);
}
for (FreeSlotBitmapCellType* cell = begin_cell; cell != end_cell; ++cell) {
PA_DCHECK(*cell == 0u);
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
}
} // namespace partition_alloc::internal
#endif // BUILDFLAG(USE_FREESLOT_BITMAP)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_H_

View File

@ -0,0 +1,63 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
namespace partition_alloc::internal {
using FreeSlotBitmapCellType = uint64_t;
constexpr size_t kFreeSlotBitmapBitsPerCell =
sizeof(FreeSlotBitmapCellType) * CHAR_BIT;
constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
// The number of bits necessary for the bitmap is equal to the maximum number of
// slots in a super page.
constexpr size_t kFreeSlotBitmapSize =
(kSuperPageSize / kSmallestBucket) / CHAR_BIT;
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
#else
return 0;
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
CommittedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
#else
return 0;
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerFreeSlotBitmap() {
return ReservedFreeSlotBitmapSize() / PartitionPageSize();
}
#if BUILDFLAG(USE_FREESLOT_BITMAP)
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
}
#endif
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_

View File

@ -0,0 +1,178 @@
# Glossary
This page describes some core terminology used in PartitionAlloc.
A weak attempt is made to present terms "in conceptual order" s.t.
each term depends mainly upon previously defined ones.
## Top-Level Terms
* **Partition**: A heap that is separated and protected both from other
partitions and from non-PartitionAlloc memory. Each partition holds
multiple buckets.
*** promo
**NOTE**: In code (and comments), "partition," "root," and even
"allocator" are all conceptually the same thing.
***
* **Bucket**: A collection of regions in a partition that contains
similar-sized objects. For example, one bucket may hold objects of
size (224,&nbsp;256], another (256,&nbsp;320], etc. Bucket size
brackets are geometrically spaced,
[going up to `kMaxBucketed`][max-bucket-comment].
* **Normal Bucket**: Any bucket whose size ceiling does not exceed
`kMaxBucketed`. This is the common case in PartitionAlloc, and
the "normal" modifier is often dropped in casual reference.
* **Direct Map (Bucket)**: Any allocation whose size exceeds `kMaxBucketed`.
Buckets consist of slot spans, organized as linked lists (see below).
## Pages
* **System Page**: A memory page defined by the CPU/OS. Commonly
referred to as a "virtual page" in other contexts. This is typically
4KiB, but it can be larger. PartitionAlloc supports up to 64KiB,
though this constant isn't always known at compile time (depending
on the OS).
* **Partition Page**: The most common granularity used by
PartitionAlloc. Consists of exactly 4 system pages.
* **Super Page**: A 2MiB region, aligned on a 2MiB boundary. Not to
be confused with OS-level terms like "large page" or "huge page",
which are also commonly 2MiB. These have to be fully committed /
uncommitted in memory, whereas super pages can be partially committed
with system page granularity.
* **Extent**: An extent is a run of consecutive super pages (belonging
to a single partition). Extents are to super pages what slot spans are
to slots (see below).
## Slots and Spans
* **Slot**: An indivisible allocation unit. Slot sizes are tied to
buckets. For example, each allocation that falls into the bucket
(224,&nbsp;256] would be satisfied with a slot of size 256. This
applies only to normal buckets, not to direct map.
* **Slot Span**: A run of same-sized slots that are contiguous in
memory. Slot span size is a multiple of partition page size, but it
isn't always a multiple of slot size, although we try hard for this
to be the case.
* **Small Bucket**: Allocations up to 4 partition pages. In these
cases, slot spans are always between 1 and 4 partition pages in
size. For each slot span size, the slot span is chosen to minimize
number of pages used while keeping the rounding waste under a
reasonable limit.
* For example, for a slot size 96, 64B waste is deemed acceptable
when using a single partition page, but for slot size
384, the potential waste of 256B wouldn't be, so 3 partition pages
are used to achieve 0B waste.
* PartitionAlloc may avoid waste by lowering the number of committed
system pages compared to the number of reserved pages. For
example, for the slot size of 896B we'd use a slot span of 2
partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit
only up to 7, thus resulting in perfect packing.
* **Single-Slot Span**: Allocations above 4 partition pages (but
&le;`kMaxBucketed`). This is because each slot span is guaranteed to
hold exactly one slot.
* Fun fact: there are sizes &le;4 partition pages that result in a
slot span having exactly 1 slot, but nonetheless they're still
classified as small buckets. The reason is that single-slot spans
are often handled by a different code path, and that distinction
is made purely based on slot size, for simplicity and efficiency.
## Other Terms
* **Object**: A chunk of memory returned to the allocating invoker
of the size requested. It doesn't have to span the entire slot,
nor does it have to begin at the slot start. This term is commonly
used as a parameter name in PartitionAlloc code, as opposed to
`slot_start`.
* **Thread Cache**: A [thread-local structure][pa-thread-cache] that
holds some not-too-large memory chunks, ready to be allocated. This
speeds up in-thread allocation by reducing a lock hold to a
thread-local storage lookup, improving cache locality.
* **Pool**: A large (and contiguous on 64-bit) virtual address region, housing
super pages, etc. from which PartitionAlloc services allocations. The
primary purpose of the pools is to provide a fast answer to the
question, "Did PartitionAlloc allocate the memory for this pointer
from this pool?" with a single bit-masking operation.
* The regular pool is a general purpose pool that contains allocations that
aren't protected by BackupRefPtr.
* The BRP pool contains all allocations protected by BackupRefPtr.
* [64-bit only] The configurable pool is named generically, because its
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime,
providing a pre-existing mapping. Its allocations aren't protected by
BackupRefPtr.
* [64-bit only] The pkey pool is returning memory tagged with a memory
protection key on supported platforms. It's primary user is [V8 CFI][v8-cfi].
*** promo
Pools are downgraded into a logical concept in 32-bit environments,
tracking a non-contiguous set of allocations using a bitmap.
***
* **Payload**: The usable area of a super page in which slot spans
reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of
other metadata (e.g. StarScan bitmaps) can bump the starting offset
forward. While this term is entrenched in the code, the team
considers it suboptimal and is actively looking for a replacement.
* **Allocation Fast Path**: A path taken during an allocation that is
considered fast. Usually means that an allocation request can be
immediately satisfied by grabbing a slot from the freelist of the
first active slot span in the bucket.
* **Allocation Slow Path**: Anything which is not fast (see above).
Can involve
* finding another active slot span in the list,
* provisioning more slots in a slot span,
* bringing back a free (or decommitted) slot span,
* allocating a new slot span, or even
* allocating a new super page.
*** aside
By "slow" we may mean something as simple as extra logic (`if`
statements etc.), or something as costly as system calls.
***
## Legacy Terms
These terms are (mostly) deprecated and should not be used. They are
surfaced here to provide a ready reference for readers coming from
older design documents or documentation.
* **GigaCage**: A memory region several gigabytes wide, reserved by
PartitionAlloc upon initialization, from which nearly all allocations
are taken. _Pools_ have overtaken GigaCage in conceptual importance,
and so and so there is less need today to refer to "GigaCage" or the
"cage." This is especially true given the V8 Sandbox and the
configurable pool (see above).
## PartitionAlloc-Everywhere
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
It was invoked explicitly, by calling PartitionAlloc APIs directly.
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
to the entire-ish codebase (exclusions apply). This was done by intercepting
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
routing them into PartitionAlloc. The shim located in
`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
responsible for intercepting. For more details, see
[base/allocator/README.md](../../../base/allocator/README.md).
A special, catch-it-all *Malloc* partition has been created for the intercepted
`malloc()` et al. This is to isolate from already existing Blink partitions.
The only exception from that is Blink's *FastMalloc* partition, which was also
catch-it-all in nature, so it's perfectly fine to merge these together, to
minimize fragmentation.
As of 2022, PartitionAlloc-Everywhere is supported on
* Windows 32- and 64-bit
* Linux
* Android 32- and 64-bit
* macOS
* Fuchsia
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#
[v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview#

View File

@ -0,0 +1,134 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/gwp_asan_support.h"
#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
#include "base/allocator/partition_allocator/freeslot_bitmap_constants.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "build/build_config.h"
namespace partition_alloc {
// static
void* GwpAsanSupport::MapRegion(size_t slot_count,
std::vector<uint16_t>& free_list) {
PA_CHECK(slot_count > 0);
constexpr PartitionOptions kConfig{
PartitionOptions::AlignedAlloc::kDisallowed,
PartitionOptions::ThreadCache::kDisabled,
PartitionOptions::Quarantine::kDisallowed,
PartitionOptions::Cookie::kDisallowed,
PartitionOptions::BackupRefPtr::kEnabled,
PartitionOptions::BackupRefPtrZapping::kDisabled,
PartitionOptions::UseConfigurablePool::kNo,
};
static internal::base::NoDestructor<ThreadSafePartitionRoot> root(kConfig);
const size_t kSlotSize = 2 * internal::SystemPageSize();
uint16_t bucket_index =
PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
kSlotSize, root->GetBucketDistribution());
auto* bucket = root->buckets + bucket_index;
const size_t kSuperPagePayloadStartOffset =
internal::SuperPagePayloadStartOffset(
/* is_managed_by_normal_buckets = */ true,
/* with_quarantine = */ false);
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
kSuperPagePayloadStartOffset;
const size_t kSuperPageGwpAsanSlotAreaEndOffset =
internal::SuperPagePayloadEndOffset();
const size_t kSuperPageGwpAsanSlotAreaSize =
kSuperPageGwpAsanSlotAreaEndOffset - kSuperPageGwpAsanSlotAreaBeginOffset;
const size_t kSlotsPerSlotSpan = bucket->get_bytes_per_span() / kSlotSize;
const size_t kSlotsPerSuperPage =
kSuperPageGwpAsanSlotAreaSize / (kSlotsPerSlotSpan * kSlotSize);
size_t super_page_count = 1 + ((slot_count - 1) / kSlotsPerSuperPage);
PA_CHECK(super_page_count <=
std::numeric_limits<size_t>::max() / kSuperPageSize);
uintptr_t super_page_span_start;
{
internal::ScopedGuard locker{root->lock_};
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
root.get(), super_page_count, 0);
if (!super_page_span_start)
return nullptr;
#if defined(ARCH_CPU_64_BITS)
// Mapping the GWP-ASan region in to the lower 32-bits of address space
// makes it much more likely that a bad pointer dereference points into
// our region and triggers a false positive report. We rely on the fact
// that PA address pools are never allocated in the first 4GB due to
// their alignment requirements.
PA_CHECK(super_page_span_start >= (1ULL << 32));
#endif // defined(ARCH_CPU_64_BITS)
uintptr_t super_page_span_end =
super_page_span_start + super_page_count * kSuperPageSize;
PA_CHECK(super_page_span_start < super_page_span_end);
for (uintptr_t super_page = super_page_span_start;
super_page < super_page_span_end; super_page += kSuperPageSize) {
auto* page_metadata =
internal::PartitionSuperPageToMetadataArea<internal::ThreadSafe>(
super_page);
// Index 0 is invalid because it is the super page extent metadata.
for (size_t partition_page_idx =
1 + internal::NumPartitionPagesPerFreeSlotBitmap();
partition_page_idx + bucket->get_pages_per_slot_span() <
internal::NumPartitionPagesPerSuperPage();
partition_page_idx += bucket->get_pages_per_slot_span()) {
auto* slot_span_metadata =
&page_metadata[partition_page_idx].slot_span_metadata;
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
auto slot_span_start =
internal::SlotSpanMetadata<internal::ThreadSafe>::ToSlotSpanStart(
slot_span_metadata);
for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
auto slot_start = slot_span_start + slot_idx * kSlotSize;
internal::PartitionRefCountPointer(slot_start)->InitalizeForGwpAsan();
size_t global_slot_idx = (slot_start - super_page_span_start -
kSuperPageGwpAsanSlotAreaBeginOffset) /
kSlotSize;
PA_DCHECK(global_slot_idx < std::numeric_limits<uint16_t>::max());
free_list.push_back(global_slot_idx);
if (free_list.size() == slot_count) {
return reinterpret_cast<void*>(
super_page_span_start + kSuperPageGwpAsanSlotAreaBeginOffset -
internal::SystemPageSize()); // Depends on the PA guard region
// in front of the super page
// payload area.
}
}
}
}
}
PA_NOTREACHED();
return nullptr;
}
// static
bool GwpAsanSupport::CanReuse(uintptr_t slot_start) {
return internal::PartitionRefCountPointer(slot_start)->CanBeReusedByGwpAsan();
}
} // namespace partition_alloc
#endif // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)

View File

@ -0,0 +1,120 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
#include <cstddef>
#include <cstdint>
#include <vector>
namespace partition_alloc {
// This class allows GWP-ASan allocations to be backed by PartitionAlloc and,
// consequently, protected by MiraclePtr.
//
// GWP-ASan mainly operates at the system memory page granularity. During
// process startup, it reserves a certain number of consecutive system pages.
//
// The standard layout is as follows:
//
// +-------------------+--------
// | | ▲ ▲
// | system page 0 |(a) (c)
// | | ▼ ▼
// +-------------------+--------
// | | ▲ ▲
// | system page 1 |(b) |
// | | ▼ |
// +-------------------+--- (d) (a) inaccessible
// | | ▲ | (b) accessible
// | system page 2 |(a) | (c) initial guard page
// | | ▼ ▼ (d) allocation slot
// +-------------------+--------
// | | ▲ ▲
// | system page 3 |(b) |
// | | ▼ |
// +-------------------+--- (d)
// | | ▲ |
// | system page 4 |(a) |
// | | ▼ ▼
// |-------------------|--------
// | | ▲ ▲
// | ... |(a) (d)
//
// Unfortunately, PartitionAlloc can't provide GWP-ASan an arbitrary number of
// consecutive allocation slots. Allocations need to be grouped into 2MB super
// pages so that the allocation metadata can be easily located.
//
// Below is the new layout:
//
// +-----------------------------------
// | | ▲ ▲
// | system page 0 | | |
// | | | |
// +-------------------+ | |
// | | | |
// | ... | (e) |
// | | | |
// +-------------------+------- | |
// | | ▲ ▲ | |
// | system page k-1 |(a) (c) | |
// | | ▼ ▼ ▼ |
// +-------------------+----------- (f)
// | | ▲ ▲ |
// | system page k |(b) | |
// | | ▼ | |
// +-------------------+--- (d) |
// | | ▲ | |
// | system page k+1 |(a) | |
// | | ▼ ▼ |
// +-------------------+----------- |
// | | | (a) inaccessible
// | ... | | (b) accessible
// | | ▼ (c) initial guard page
// +----------------------------------- (d) allocation slot
// | | ▲ ▲ (e) super page metadata
// | system page m | | | (f) super page
// | | | | (g) pseudo allocation slot
// +-------------------+------- | |
// | | ▲ | |
// | ... | | (e) |
// | | | | |
// +-------------------+--- (g) | |
// | | ▲ | | |
// | system page m+k-1 |(a) | | |
// | | ▼ ▼ ▼ |
// +-------------------+----------- (f)
// | | ▲ ▲ |
// | system page m+k |(b) | |
// | | ▼ | |
// +-------------------+--- (d) |
// | | ▲ | |
// | system page m+k+1 |(a) | |
// | | ▼ ▼ |
// +-------------------+----------- |
// | | |
// | ... | |
// | | ▼
// +-------------------+---------------
//
// This means some allocation slots will be reserved to hold PA
// metadata. We exclude these pseudo slots from the GWP-ASan free list so that
// they are never used for anything other that storing the metadata.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) GwpAsanSupport {
public:
static void* MapRegion(size_t slot_count, std::vector<uint16_t>& free_list);
static bool CanReuse(uintptr_t slot_start);
};
} // namespace partition_alloc
#endif // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_GWP_ASAN_SUPPORT_H_

View File

@ -0,0 +1,100 @@
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#if BUILDFLAG(STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif // BUILDFLAG(STARSCAN)
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
// cause significant jank.
#define PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM 0
namespace partition_alloc {
// static
MemoryReclaimer* MemoryReclaimer::Instance() {
static internal::base::NoDestructor<MemoryReclaimer> instance;
return instance.get();
}
void MemoryReclaimer::RegisterPartition(PartitionRoot<>* partition) {
internal::ScopedGuard lock(lock_);
PA_DCHECK(partition);
auto it_and_whether_inserted = partitions_.insert(partition);
PA_DCHECK(it_and_whether_inserted.second);
}
void MemoryReclaimer::UnregisterPartition(
PartitionRoot<internal::ThreadSafe>* partition) {
internal::ScopedGuard lock(lock_);
PA_DCHECK(partition);
size_t erased_count = partitions_.erase(partition);
PA_DCHECK(erased_count == 1u);
}
MemoryReclaimer::MemoryReclaimer() = default;
MemoryReclaimer::~MemoryReclaimer() = default;
void MemoryReclaimer::ReclaimAll() {
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages |
PurgeFlags::kAggressiveReclaim;
Reclaim(kFlags);
}
void MemoryReclaimer::ReclaimNormal() {
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages;
Reclaim(kFlags);
}
void MemoryReclaimer::Reclaim(int flags) {
internal::ScopedGuard lock(
lock_); // Has to protect from concurrent (Un)Register calls.
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
#if PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM && BUILDFLAG(STARSCAN)
{
using PCScan = internal::PCScan;
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::PerformScanIfNeeded(invocation_mode);
}
#endif
#if defined(PA_THREAD_CACHE_SUPPORTED)
// Don't completely empty the thread cache outside of low memory situations,
// as there is periodic purge which makes sure that it doesn't take too much
// space.
if (flags & PurgeFlags::kAggressiveReclaim)
ThreadCacheRegistry::Instance().PurgeAll();
#endif
for (auto* partition : partitions_)
partition->PurgeMemory(flags);
}
void MemoryReclaimer::ResetForTesting() {
internal::ScopedGuard lock(lock_);
partitions_.clear();
}
} // namespace partition_alloc

View File

@ -0,0 +1,72 @@
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#include <memory>
#include <set>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_lock.h"
namespace partition_alloc {
// Posts and handles memory reclaim tasks for PartitionAlloc.
//
// PartitionAlloc users are responsible for scheduling and calling the
// reclamation methods with their own timers / event loops.
//
// Singleton as this runs as long as the process is alive, and
// having multiple instances would be wasteful.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
public:
static MemoryReclaimer* Instance();
MemoryReclaimer(const MemoryReclaimer&) = delete;
MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
// Internal. Do not use.
// Registers a partition to be tracked by the reclaimer.
void RegisterPartition(PartitionRoot<>* partition);
// Internal. Do not use.
// Unregisters a partition to be tracked by the reclaimer.
void UnregisterPartition(PartitionRoot<>* partition);
// Triggers an explicit reclaim now to reclaim as much free memory as
// possible. The API callers need to invoke this method periodically
// if they want to use memory reclaimer.
// See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
void ReclaimNormal();
// Returns a recommended interval to invoke ReclaimNormal.
int64_t GetRecommendedReclaimIntervalInMicroseconds() {
return internal::base::Seconds(4).InMicroseconds();
}
// Triggers an explicit reclaim now reclaiming all free memory
void ReclaimAll();
private:
MemoryReclaimer();
~MemoryReclaimer();
// |flags| is an OR of base::PartitionPurgeFlags
void Reclaim(int flags);
void ReclaimAndReschedule();
void ResetForTesting();
internal::Lock lock_;
std::set<PartitionRoot<>*> partitions_ PA_GUARDED_BY(lock_);
friend class internal::base::NoDestructor<MemoryReclaimer>;
friend class MemoryReclaimerTest;
};
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_

View File

@ -0,0 +1,81 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#include <stdlib.h>
#include <array>
#endif // BUILDFLAG(IS_WIN)
namespace partition_alloc {
size_t g_oom_size = 0U;
namespace internal {
// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
// TODO(crbug.com/1151236): Update to
// partition_alloc::internal::base::internal::OnNoMemoryInternal
PA_NOINLINE void OnNoMemoryInternal(size_t size) {
g_oom_size = size;
#if BUILDFLAG(IS_WIN)
// Kill the process. This is important for security since most of code
// does not check the result of memory allocation.
// https://msdn.microsoft.com/en-us/library/het71c37.aspx
// Pass the size of the failed request in an exception argument.
ULONG_PTR exception_args[] = {size};
::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
std::size(exception_args), exception_args);
// Safety check, make sure process exits here.
_exit(win::kOomExceptionCode);
#else
size_t tmp_size = size;
internal::base::debug::Alias(&tmp_size);
// Note: Don't add anything that may allocate here. Depending on the
// allocator, this may be called from within the allocator (e.g. with
// PartitionAlloc), and would deadlock as our locks are not recursive.
//
// Additionally, this is unlikely to work, since allocating from an OOM
// handler is likely to fail.
//
// Use PA_IMMEDIATE_CRASH() so that the top frame in the crash is our code,
// rather than using abort() or similar; this avoids the crash server needing
// to be able to successfully unwind through libc to get to the correct
// address, which is particularly an issue on Android.
PA_IMMEDIATE_CRASH();
#endif // BUILDFLAG(IS_WIN)
}
} // namespace internal
void TerminateBecauseOutOfMemory(size_t size) {
internal::OnNoMemoryInternal(size);
}
namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
RunPartitionAllocOomCallback();
TerminateBecauseOutOfMemory(size);
PA_IMMEDIATE_CRASH();
}
} // namespace internal
} // namespace partition_alloc

View File

@ -0,0 +1,70 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#include <cstddef>
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
#endif
namespace partition_alloc {
// Terminates process. Should be called only for out of memory errors.
// |size| is the size of the failed allocation, or 0 if not known.
// Crash reporting classifies such crashes as OOM.
// Must be allocation-safe.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void TerminateBecauseOutOfMemory(size_t size);
// Records the size of the allocation that caused the current OOM crash, for
// consumption by Breakpad.
// TODO: this can be removed when Breakpad is no longer supported.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
#if BUILDFLAG(IS_WIN)
namespace win {
// Custom Windows exception code chosen to indicate an out of memory error.
// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
// "To make sure that you do not define a code that conflicts with an existing
// exception code" ... "The resulting error code should therefore have the
// highest four bits set to hexadecimal E."
// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
const DWORD kOomExceptionCode = 0xe0000008;
} // namespace win
#endif
namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
OnNoMemory(size_t size);
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.
// OOM_CRASH(size) is called by users of PageAllocator (including
// PartitionAlloc) to signify an allocation failure from the platform.
#define OOM_CRASH(size) \
do { \
/* Raising an exception might allocate, allow that. */ \
::partition_alloc::ScopedAllowAllocations guard{}; \
::partition_alloc::internal::OnNoMemory(size); \
} while (0)
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_

View File

@ -0,0 +1,27 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
namespace partition_alloc {
namespace {
PartitionAllocOomCallback g_oom_callback;
} // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
PA_DCHECK(!g_oom_callback);
g_oom_callback = callback;
}
namespace internal {
void RunPartitionAllocOomCallback() {
if (g_oom_callback)
g_oom_callback();
}
} // namespace internal
} // namespace partition_alloc

View File

@ -0,0 +1,26 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc {
using PartitionAllocOomCallback = void (*)();
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
// allocation failure from the platform.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback);
namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RunPartitionAllocOomCallback();
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_

View File

@ -0,0 +1,407 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include <atomic>
#include <cstdint>
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#endif
#if BUILDFLAG(IS_WIN)
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
#elif BUILDFLAG(IS_POSIX)
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
#elif BUILDFLAG(IS_FUCHSIA)
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
#else
#error Platform not supported.
#endif
namespace partition_alloc {
namespace {
internal::Lock g_reserve_lock;
// We may reserve/release address space on different threads.
internal::Lock& GetReserveLock() {
return g_reserve_lock;
}
std::atomic<size_t> g_total_mapped_address_space;
// We only support a single block of reserved address space.
uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
uintptr_t AllocPagesIncludingReserved(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1) {
uintptr_t ret =
internal::SystemAllocPages(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
if (!ret) {
const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
}
}
return ret;
}
// Trims memory at |base_address| to given |trim_length| and |alignment|.
//
// On failure, on Windows, this function returns 0 and frees memory at
// |base_address|.
uintptr_t TrimMapping(uintptr_t base_address,
size_t base_length,
size_t trim_length,
uintptr_t alignment,
uintptr_t alignment_offset,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(base_length >= trim_length);
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
PA_DCHECK(alignment_offset < alignment);
uintptr_t new_base =
NextAlignedWithOffset(base_address, alignment, alignment_offset);
PA_DCHECK(new_base >= base_address);
size_t pre_slack = new_base - base_address;
size_t post_slack = base_length - pre_slack - trim_length;
PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
PA_DCHECK(pre_slack < base_length);
PA_DCHECK(post_slack < base_length);
return internal::TrimMappingInternal(base_address, base_length, trim_length,
accessibility, pre_slack, post_slack);
}
} // namespace
// Align |address| up to the closest, non-smaller address, that gives
// |requested_offset| remainder modulo |alignment|.
//
// Examples for alignment=1024 and requested_offset=64:
// 64 -> 64
// 65 -> 1088
// 1024 -> 1088
// 1088 -> 1088
// 1089 -> 2112
// 2048 -> 2112
uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t alignment,
uintptr_t requested_offset) {
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
PA_DCHECK(requested_offset < alignment);
uintptr_t actual_offset = address & (alignment - 1);
uintptr_t new_address;
if (actual_offset <= requested_offset)
new_address = address + requested_offset - actual_offset;
else
new_address = address + alignment + requested_offset - actual_offset;
PA_DCHECK(new_address >= address);
PA_DCHECK(new_address - address < alignment);
PA_DCHECK(new_address % alignment == requested_offset);
return new_address;
}
namespace internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret = internal::SystemAllocPagesInternal(
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
return ret;
}
} // namespace internal
uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
file_descriptor_for_shared_alloc);
}
uintptr_t AllocPages(uintptr_t address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
page_tag);
}
void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return reinterpret_cast<void*>(
AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
accessibility, page_tag));
}
uintptr_t AllocPagesWithAlignOffset(
uintptr_t address,
size_t length,
size_t align,
size_t align_offset,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
PA_DCHECK(length >= internal::PageAllocationGranularity());
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
PA_DCHECK(align_offset < align);
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
// If the client passed null as the address, choose a good one.
if (!address) {
address = (GetRandomPageBase() & align_base_mask) + align_offset;
}
// First try to force an exact-size, aligned allocation from our random base.
#if defined(ARCH_CPU_32_BITS)
// On 32 bit systems, first try one random aligned address, and then try an
// aligned address derived from the value of |ret|.
constexpr int kExactSizeTries = 2;
#else
// On 64 bit systems, try 3 random aligned addresses.
constexpr int kExactSizeTries = 3;
#endif
for (int i = 0; i < kExactSizeTries; ++i) {
uintptr_t ret =
AllocPagesIncludingReserved(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
if (ret) {
// If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset)
return ret;
// Free the memory and try again.
FreePages(ret, length);
} else {
// |ret| is null; if this try was unhinted, we're OOM.
if (internal::kHintIsAdvisory || !address)
return 0;
}
#if defined(ARCH_CPU_32_BITS)
// For small address spaces, try the first aligned address >= |ret|. Note
// |ret| may be null, in which case |address| becomes null. If
// |align_offset| is non-zero, this calculation may get us not the first,
// but the next matching address.
address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
#endif
}
// Make a larger allocation so we can force alignment.
size_t try_length = length + (align - internal::PageAllocationGranularity());
PA_CHECK(try_length >= length);
uintptr_t ret;
do {
// Continue randomizing only on POSIX.
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
ret =
AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag, file_descriptor_for_shared_alloc);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret && (ret = TrimMapping(ret, try_length, length, align,
align_offset, accessibility)) == 0);
return ret;
}
void FreePages(uintptr_t address, size_t length) {
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
internal::FreePagesInternal(address, length);
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
}
void FreePages(void* address, size_t length) {
FreePages(reinterpret_cast<uintptr_t>(address), length);
}
bool TrySetSystemPagesAccess(uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
return internal::TrySetSystemPagesAccessInternal(address, length,
accessibility);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
accessibility);
}
void SetSystemPagesAccess(uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DecommitSystemPagesInternal(address, length,
accessibility_disposition);
}
void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
accessibility_disposition);
}
void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DecommitAndZeroSystemPagesInternal(address, length);
}
void DecommitAndZeroSystemPages(void* address, size_t length) {
DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
void RecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible);
internal::RecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition);
}
bool TryRecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Duplicated because we want errors to be reported at a lower level in the
// crashing case.
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible);
return internal::TryRecommitSystemPagesInternal(
address, length, accessibility, accessibility_disposition);
}
void DiscardSystemPages(uintptr_t address, size_t length) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DiscardSystemPagesInternal(address, length);
}
void DiscardSystemPages(void* address, size_t length) {
DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address) {
uintptr_t mem = internal::SystemAllocPages(
0, size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kChromium);
if (mem) {
// We guarantee this alignment when reserving address space.
PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
s_reservation_address = mem;
s_reservation_size = size;
return true;
}
}
return false;
}
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address)
return false;
FreePages(s_reservation_address, s_reservation_size);
s_reservation_address = 0;
s_reservation_size = 0;
return true;
}
bool HasReservationForTesting() {
internal::ScopedGuard guard(GetReserveLock());
return s_reservation_address;
}
uint32_t GetAllocPageErrorCode() {
return internal::s_allocPageErrorCode;
}
size_t GetTotalMappedSize() {
return g_total_mapped_address_space;
}
#if BUILDFLAG(IS_WIN)
namespace {
bool g_retry_on_commit_failure = false;
}
void SetRetryOnCommitFailure(bool retry_on_commit_failure) {
g_retry_on_commit_failure = retry_on_commit_failure;
}
bool GetRetryOnCommitFailure() {
return g_retry_on_commit_failure;
}
#endif
} // namespace partition_alloc

View File

@ -0,0 +1,366 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#include <cstddef>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "build/build_config.h"
namespace partition_alloc {
struct PageAccessibilityConfiguration {
enum Permissions {
kInaccessible,
kRead,
kReadWrite,
// This flag is mapped to kReadWrite on systems that
// don't support MTE.
kReadWriteTagged,
// This flag is mapped to kReadExecute on systems
// that don't support Arm's BTI.
kReadExecuteProtected,
kReadExecute,
// This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
kReadWriteExecute,
};
#if BUILDFLAG(ENABLE_PKEYS)
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {}
#else
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS)
Permissions permissions;
#if BUILDFLAG(ENABLE_PKEYS)
// Tag the page with a Memory Protection Key. Use 0 for none.
int pkey;
#endif // BUILDFLAG(ENABLE_PKEYS)
};
// Use for De/RecommitSystemPages API.
enum class PageAccessibilityDisposition {
// Enforces permission update (Decommit will set to
// PageAccessibilityConfiguration::kInaccessible;
// Recommit will set to whatever was requested, other than
// PageAccessibilityConfiguration::kInaccessible).
kRequireUpdate,
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
// only).
kAllowKeepForPerf,
};
// macOS supports tagged memory regions, to help in debugging. On Android,
// these tags are used to name anonymous mappings.
enum class PageTag {
kFirst = 240, // Minimum tag value.
kBlinkGC = 252, // Blink GC pages.
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
kChromium = 254, // Chromium page.
kV8 = 255, // V8 heap pages.
kLast = kV8 // Maximum tag value.
};
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t NextAlignedWithOffset(uintptr_t ptr,
uintptr_t alignment,
uintptr_t requested_offset);
// Allocates one or more pages.
//
// The requested |address| is just a hint; the actual address returned may
// differ. The returned address will be aligned to |align_offset| modulo |align|
// bytes.
//
// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
// |align_offset| must be less than |align|. |align| must be a power of two.
//
// If |address| is 0/nullptr, then a suitable and randomized address will be
// chosen automatically.
//
// |accessibility| controls the permission of the allocated pages.
// PageAccessibilityConfiguration::kInaccessible means uncommitted.
//
// |page_tag| is used on some platforms to identify the source of the
// allocation. Use PageTag::kChromium as a catch-all category.
//
// |file_descriptor_for_shared_alloc| is only used in mapping the shadow
// pools to the same physical address as the real one in
// PartitionAddressSpace::Init(). It should be ignored in other cases.
//
// This call will return 0/nullptr if the allocation cannot be satisfied.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(uintptr_t address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPagesWithAlignOffset(
uintptr_t address,
size_t length,
size_t align,
size_t align_offset,
PageAccessibilityConfiguration page_accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
// Frees one or more pages starting at |address| and continuing for |length|
// bytes.
//
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
// |length| must be a multiple of |PageAllocationGranularity()|.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void FreePages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void FreePages(void* address, size_t length);
// Marks one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Marks one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Performs a CHECK that the operation succeeds.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetSystemPagesAccess(uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Decommits one or more system pages starting at |address| and continuing for
// |length| bytes. |address| and |length| must be aligned to a system page
// boundary.
//
// This API will crash if the operation cannot be performed!
//
// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
// the decommitted pages will be made inaccessible before the call returns.
// While it is always a programming error to access decommitted pages without
// first recommitting them, callers may use
// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
// to skip changing permissions (use with care), for performance reasons (see
// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
// in the past). Implementations may choose to always modify permissions, hence
// accessing those pages may or may not trigger a fault.
//
// Decommitting means that physical resources (RAM or swap/pagefile) backing the
// allocated virtual address range may be released back to the system, but the
// address space is still allocated to the process (possibly using up page table
// entries or other accounting resources). There is no guarantee that the pages
// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
//
// This operation may not be atomic on some platforms.
//
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
// processes will not fault when touching a committed memory region. There is
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
// best-effort allocated resources on the first touch. If
// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
// behaves in a platform-agnostic way by simulating the Windows "decommit" state
// by both discarding the region (allowing the OS to avoid swap operations)
// *and* changing the page protections so accesses fault.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition);
// Decommits one or more system pages starting at |address| and continuing for
// |length| bytes. |address| and |length| must be aligned to a system page
// boundary.
//
// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
// zeroed and will always mark the region as inaccessible (the equivalent of
// setting them to PageAccessibilityConfiguration::kInaccessible).
//
// This API will crash if the operation cannot be performed.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DecommitAndZeroSystemPages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DecommitAndZeroSystemPages(void* address, size_t length);
// Whether decommitted memory is guaranteed to be zeroed when it is
// recommitted. Do not assume that this will not change over time.
constexpr PA_COMPONENT_EXPORT(
PARTITION_ALLOC) bool DecommittedMemoryIsAlwaysZeroed() {
#if BUILDFLAG(IS_APPLE)
return false;
#else
return true;
#endif
}
// (Re)Commits one or more system pages, starting at |address| and continuing
// for |length| bytes with the given |page_accessibility| (must not be
// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
// must be aligned to a system page boundary.
//
// This API will crash if the operation cannot be performed!
//
// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
// updates the pages to |page_accessibility|. This can be used regardless of
// what disposition was used to decommit the pages.
// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
// to leave the page permissions, if that improves performance. This option can
// only be used if the pages were previously accessible and decommitted with
// that same option.
//
// The memory will be zeroed when it is committed for the first time. However,
// there is no such guarantee when memory is recommitted, unless
// |DecommittedMemoryIsAlwaysZeroed()| is true.
//
// This operation may not be atomic on some platforms.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void RecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition);
// Like RecommitSystemPages(), but returns false instead of crashing.
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TryRecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition);
// Discard one or more system pages starting at |address| and continuing for
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
//
// Discarding is a hint to the system that the page is no longer required. The
// hint may:
// - Do nothing.
// - Discard the page immediately, freeing up physical pages.
// - Discard the page at some time in the future in response to memory
// pressure.
//
// Only committed pages should be discarded. Discarding a page does not decommit
// it, and it is valid to discard an already-discarded page. A read or write to
// a discarded page will not fault.
//
// Reading from a discarded page may return the original page content, or a page
// full of zeroes.
//
// Writing to a discarded page is the only guaranteed way to tell the system
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DiscardSystemPages(uintptr_t address, size_t length);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void DiscardSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToSystemPage(uintptr_t address) {
return (address + internal::SystemPageOffsetMask()) &
internal::SystemPageBaseMask();
}
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToSystemPage(uintptr_t address) {
return address & internal::SystemPageBaseMask();
}
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) {
return (address + internal::PageAllocationGranularityOffsetMask()) &
internal::PageAllocationGranularityBaseMask();
}
// Rounds down |address| to the previous multiple of
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) {
return address & internal::PageAllocationGranularityBaseMask();
}
// Reserves (at least) |size| bytes of address space, aligned to
// |PageAllocationGranularity()|. This can be called early on to make it more
// likely that large allocations will succeed. Returns true if the reservation
// succeeded, false if the reservation failed or a reservation was already made.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReserveAddressSpace(size_t size);
// Releases any reserved address space. |AllocPages| calls this automatically on
// an allocation failure. External allocators may also call this on failure.
//
// Returns true when an existing reservation was released.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReleaseReservation();
// Returns true if there is currently an address space reservation.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasReservationForTesting();
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
// (POSIX) or |VirtualAlloc| (Windows) fails.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetAllocPageErrorCode();
// Returns the total amount of mapped pages from all clients of
// PageAllocator. These pages may or may not be committed. This is mostly useful
// to assess address space pressure.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
#if BUILDFLAG(IS_WIN)
// Sets whether to retry the allocation of pages when a commit failure
// happens. This doesn't cover cases where the system is out of address space,
// or reaches another limit.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetRetryOnCommitFailure(bool retry_on_commit_failure);
bool GetRetryOnCommitFailure();
#endif // BUILDFLAG(IS_WIN)
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_

View File

@ -0,0 +1,169 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
#include <mach/vm_page_size.h>
// Although page allocator constants are not constexpr, they are run-time
// constant. Because the underlying variables they access, such as vm_page_size,
// are not marked const, the compiler normally has no way to know that they
// dont change and must obtain their values whenever it can't prove that they
// haven't been modified, even if they had already been obtained previously.
// Attaching __attribute__((const)) to these declarations allows these redundant
// accesses to be omitted under optimization such as common subexpression
// elimination.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// This should work for all POSIX (if needed), but currently all other
// supported OS/architecture combinations use either hard-coded values
// (such as x86) or have means to determine these values without needing
// atomics (such as macOS on arm64).
// Page allocator constants are run-time constant
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#include <unistd.h>
#include <atomic>
namespace partition_alloc::internal {
// Holds the current page size and shift, where size = 1 << shift
// Use PageAllocationGranularity(), PageAllocationGranularityShift()
// to initialize and retrieve these values safely.
struct PageCharacteristics {
std::atomic<size_t> size;
std::atomic<size_t> shift;
};
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
extern PageCharacteristics page_characteristics;
} // namespace partition_alloc::internal
#else
// When defined, page size constants are fixed at compile time. When not
// defined, they may vary at run time.
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
// Use this macro to declare a function as constexpr or not based on whether
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
#endif
namespace partition_alloc::internal {
// Forward declaration, implementation below
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity();
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityShift() {
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (PA_UNLIKELY(shift == 0)) {
shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
page_characteristics.shift.store(shift, std::memory_order_relaxed);
}
return shift;
#else
return 12; // 4kB
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity() {
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
// below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
return vm_page_size;
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
// initialize cache.
size_t size = page_characteristics.size.load(std::memory_order_relaxed);
if (PA_UNLIKELY(size == 0)) {
size = static_cast<size_t>(getpagesize());
page_characteristics.size.store(size, std::memory_order_relaxed);
}
return size;
#else
return 1 << PageAllocationGranularityShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityOffsetMask() {
return PageAllocationGranularity() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityBaseMask() {
return ~PageAllocationGranularityOffsetMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageShift() {
// On Windows allocation granularity is higher than the page size. This comes
// into play when reserving address space range (allocation granularity),
// compared to committing pages into memory (system page granularity).
#if BUILDFLAG(IS_WIN)
return 12; // 4096=1<<12
#else
return PageAllocationGranularityShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageSize() {
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
// This is literally equivalent to |1 << SystemPageShift()| below, but was
// separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
// non-constexpr.
return PageAllocationGranularity();
#else
return 1 << SystemPageShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageOffsetMask() {
return SystemPageSize() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageBaseMask() {
return ~SystemPageOffsetMask();
}
constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_

View File

@ -0,0 +1,23 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
#include <cstddef>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator.h"
namespace partition_alloc::internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_

View File

@ -0,0 +1,242 @@
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file implements memory allocation primitives for PageAllocator using
// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
// object that corresponds to a set of memory pages. VMO pages may be mapped
// to an address space. The code below creates VMOs for each memory allocations
// and maps them to the default address space of the current process.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal {
namespace {
// Returns VMO name for a PageTag.
const char* PageTagToName(PageTag tag) {
switch (tag) {
case PageTag::kBlinkGC:
return "cr_blink_gc";
case PageTag::kPartitionAlloc:
return "cr_partition_alloc";
case PageTag::kChromium:
return "cr_chromium";
case PageTag::kV8:
return "cr_v8";
default:
PA_DCHECK(false);
return "";
}
}
zx_vm_option_t PageAccessibilityToZxVmOptions(
PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return ZX_VM_PERM_READ;
case PageAccessibilityConfiguration::kReadWrite:
case PageAccessibilityConfiguration::kReadWriteTagged:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
case PageAccessibilityConfiguration::kReadExecuteProtected:
case PageAccessibilityConfiguration::kReadExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
case PageAccessibilityConfiguration::kReadWriteExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
default:
PA_NOTREACHED();
[[fallthrough]];
case PageAccessibilityConfiguration::kInaccessible:
return 0;
}
}
} // namespace
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
// |hint| is not advisory.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{0};
uintptr_t SystemAllocPagesInternal(
uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
zx::vmo vmo;
zx_status_t status = zx::vmo::create(length, 0, &vmo);
if (status != ZX_OK) {
PA_ZX_DLOG(INFO, status) << "zx_vmo_create";
return 0;
}
const char* vmo_name = PageTagToName(page_tag);
status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
// VMO names are used only for debugging, so failure to set a name is not
// fatal.
PA_ZX_DCHECK(status == ZX_OK, status);
if (page_tag == PageTag::kV8) {
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
// in the new VMO.
status = vmo.replace_as_executable(zx::resource(), &vmo);
if (status != ZX_OK) {
PA_ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
return 0;
}
}
zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
uint64_t vmar_offset = 0;
if (hint) {
vmar_offset = hint;
options |= ZX_VM_SPECIFIC;
}
uint64_t address;
status =
zx::vmar::root_self()->map(options, vmar_offset, vmo,
/*vmo_offset=*/0, length, &address);
if (status != ZX_OK) {
// map() is expected to fail if |hint| is set to an already-in-use location.
if (!hint) {
PA_ZX_DLOG(ERROR, status) << "zx_vmar_map";
}
return 0;
}
return address;
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
// Unmap head if necessary.
if (pre_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
PA_ZX_CHECK(status == ZX_OK, status);
}
// Unmap tail if necessary.
if (post_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(
base_address + pre_slack + trim_length, post_slack);
PA_ZX_CHECK(status == ZX_OK, status);
}
return base_address + pre_slack;
}
bool TrySetSystemPagesAccessInternal(
uint64_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect(
PageAccessibilityToZxVmOptions(accessibility), address, length);
return status == ZX_OK;
}
void SetSystemPagesAccessInternal(
uint64_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect(
PageAccessibilityToZxVmOptions(accessibility), address, length);
PA_ZX_CHECK(status == ZX_OK, status);
}
void FreePagesInternal(uint64_t address, size_t length) {
zx_status_t status = zx::vmar::root_self()->unmap(address, length);
PA_ZX_CHECK(status == ZX_OK, status);
}
void DiscardSystemPagesInternal(uint64_t address, size_t length) {
// TODO(https://crbug.com/1022062): Mark pages as discardable, rather than
// forcibly de-committing them immediately, when Fuchsia supports it.
zx_status_t status = zx::vmar::root_self()->op_range(
ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0);
PA_ZX_CHECK(status == ZX_OK, status);
}
void DecommitSystemPagesInternal(
uint64_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
}
// TODO(https://crbug.com/1022062): Review whether this implementation is
// still appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
// discardable API.
DiscardSystemPagesInternal(address, length);
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
// TODO(https://crbug.com/1022062): this implementation will likely no longer
// be appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
// discardable API.
DiscardSystemPagesInternal(address, length);
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length, accessibility);
}
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
return TrySetSystemPagesAccess(address, length, accessibility);
}
return true;
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_

View File

@ -0,0 +1,43 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include <sys/mman.h>
// PA_PROT_BTI requests a page that supports BTI landing pads.
#define PA_PROT_BTI 0x10
// PA_PROT_MTE requests a page that's suitable for memory tagging.
#define PA_PROT_MTE 0x20
namespace partition_alloc::internal {
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return PROT_READ;
case PageAccessibilityConfiguration::kReadWriteTagged:
#if defined(ARCH_CPU_ARM64)
return PROT_READ | PROT_WRITE |
(base::CPU::GetInstanceNoAllocation().has_mte() ? PA_PROT_MTE : 0);
#else
[[fallthrough]];
#endif
case PageAccessibilityConfiguration::kReadWrite:
return PROT_READ | PROT_WRITE;
case PageAccessibilityConfiguration::kReadExecuteProtected:
return PROT_READ | PROT_EXEC |
(base::CPU::GetInstanceNoAllocation().has_bti() ? PA_PROT_BTI : 0);
case PageAccessibilityConfiguration::kReadExecute:
return PROT_READ | PROT_EXEC;
case PageAccessibilityConfiguration::kReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
case PageAccessibilityConfiguration::kInaccessible:
return PROT_NONE;
}
}
} // namespace partition_alloc::internal

View File

@ -0,0 +1,395 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#include <algorithm>
#include <cerrno>
#include <cstdint>
#include <cstring>
#include <sys/mman.h>
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
#if BUILDFLAG(IS_IOS)
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#elif BUILDFLAG(IS_MAC)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
#else
#error "Unknown platform"
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h"
#include <Availability.h>
#include <Security/Security.h>
#include <mach/mach.h>
#endif
#if BUILDFLAG(IS_ANDROID)
#include <sys/prctl.h>
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <sys/resource.h>
#endif
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#if BUILDFLAG(IS_MAC)
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although its
// available on iOS and other Apple operating systems. It is, in fact, present
// on the system since macOS 10.12.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wavailability"
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
#pragma clang diagnostic pop
#endif // BUILDFLAG(IS_MAC)
namespace partition_alloc::internal {
namespace {
#if BUILDFLAG(IS_ANDROID)
const char* PageTagToName(PageTag tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_ndk the kernel keeps a pointer to the name instead
// of copying it.
//
// Having the name in .rodata ensures that the pointer remains valid as
// long as the mapping is alive.
switch (tag) {
case PageTag::kBlinkGC:
return "blink_gc";
case PageTag::kPartitionAlloc:
return "partition_alloc";
case PageTag::kChromium:
return "chromium";
case PageTag::kV8:
return "v8";
default:
PA_DCHECK(false);
return "";
}
}
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_MAC)
// Tests whether the version of macOS supports the MAP_JIT flag and if the
// current process is signed with the hardened runtime and the allow-jit
// entitlement, returning whether MAP_JIT should be used to allocate regions
// that will contain JIT-compiled executable code.
bool UseMapJit() {
if (!base::mac::IsAtLeastOS10_14()) {
// MAP_JIT existed before macOS 10.14, but had somewhat different semantics.
// Only one MAP_JIT region was permitted per process, but calling code here
// will very likely require more than one such region. Since MAP_JIT is not
// strictly necessary to write code to a region and then execute it on these
// older OSes, dont use it at all.
return false;
}
// Until determining that the hardened runtime is enabled, early returns will
// return true, so that MAP_JIT will be used. This is important on arm64,
// which only allows pages to be simultaneously writable and executable when
// in a region allocated with MAP_JIT, regardless of code signing options. On
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
// executable fails with EPERM. Although this is not enforced on x86_64,
// MAP_JIT is harmless in that case.
base::ScopedCFTypeRef<SecTaskRef> task(
SecTaskCreateFromSelf(kCFAllocatorDefault));
if (!task) {
return true;
}
uint32_t flags = SecTaskGetCodeSignStatus(task);
if (!(flags & kSecCodeSignatureRuntime)) {
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
// == CS_RUNTIME.
return true;
}
// The hardened runtime is enabled. From this point on, early returns must
// return false, indicating that MAP_JIT is not to be used. Its an error
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
// entitlement is specified.
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement)
return false;
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
}
#endif // BUILDFLAG(IS_MAC)
} // namespace
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
std::atomic<int32_t> s_allocPageErrorCode{0};
int GetAccessFlags(PageAccessibilityConfiguration accessibility);
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
#if BUILDFLAG(IS_APPLE)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
PA_DCHECK(PageTag::kFirst <= page_tag);
PA_DCHECK(PageTag::kLast >= page_tag);
int fd = file_descriptor_for_shared_alloc == -1
? VM_MAKE_TAG(static_cast<int>(page_tag))
: file_descriptor_for_shared_alloc;
#else
int fd = file_descriptor_for_shared_alloc;
#endif
int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if BUILDFLAG(IS_MAC)
// On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit"
// code signing entitlement and allocating the region with the MAP_JIT flag.
static const bool kUseMapJit = UseMapJit();
if (page_tag == PageTag::kV8 && kUseMapJit) {
map_flags |= MAP_JIT;
}
#endif
void* ret = mmap(reinterpret_cast<void*>(hint), length, access_flag,
map_flags, fd, 0);
if (ret == MAP_FAILED) {
s_allocPageErrorCode = errno;
ret = nullptr;
}
#if BUILDFLAG(IS_ANDROID)
// On Android, anonymous mappings can have a name attached to them. This is
// useful for debugging, and double-checking memory attribution.
if (ret) {
// No error checking on purpose, testing only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
PageTagToName(page_tag));
}
#endif
return reinterpret_cast<uintptr_t>(ret);
}
bool TrySetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
#if BUILDFLAG(ENABLE_PKEYS)
return 0 == PkeyMprotectIfEnabled(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility),
accessibility.pkey);
#else
return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility)));
#endif
}
void SetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
int access_flags = GetAccessFlags(accessibility);
#if BUILDFLAG(ENABLE_PKEYS)
int ret =
PkeyMprotectIfEnabled(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility), accessibility.pkey);
#else
int ret = PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility)));
#endif
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
// kernel data structures cannot be allocated, (2) the address range is
// invalid, or (3) this would split an existing mapping in a way that would
// exceed the maximum number of allowed mappings.
//
// Neither are very likely, but we still get a lot of crashes here. This is
// because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
// access flags match a "data" mapping, which in our case would be MAP_PRIVATE
// | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
// mm/mprotect.c in the kernel for details.
//
// In this case, we are almost certainly bumping into the sandbox limit, mark
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
OOM_CRASH(length);
PA_PCHECK(0 == ret);
}
void FreePagesInternal(uintptr_t address, size_t length) {
PA_PCHECK(0 == munmap(reinterpret_cast<void*>(address), length));
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
uintptr_t ret = base_address;
// We can resize the allocation run. Release unneeded memory before and after
// the aligned range.
if (pre_slack) {
FreePages(base_address, pre_slack);
ret = base_address + pre_slack;
}
if (post_slack) {
FreePages(ret + trim_length, post_slack);
}
return ret;
}
void DecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// In POSIX, there is no decommit concept. Discarding is an effective way of
// implementing the Windows semantics where the OS is allowed to not swap the
// pages in the region.
DiscardSystemPages(address, length);
bool change_permissions =
accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
#if BUILDFLAG(PA_DCHECK_IS_ON)
// This is not guaranteed, show that we're serious.
//
// More specifically, several callers have had issues with assuming that
// memory is zeroed, this would hopefully make these bugs more visible. We
// don't memset() everything, because ranges can be very large, and doing it
// over the entire range could make Chrome unusable with
// BUILDFLAG(PA_DCHECK_IS_ON).
//
// Only do it when we are about to change the permissions, since we don't know
// the previous permissions, and cannot restore them.
if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
// Memory may not be writable.
size_t size = std::min(length, 2 * SystemPageSize());
void* ptr = reinterpret_cast<void*>(address);
PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0);
memset(ptr, 0xcc, size);
}
#endif
// Make pages inaccessible, unless the caller requested to keep permissions.
//
// Note, there is a small window between these calls when the pages can be
// incorrectly touched and brought back to memory. Not ideal, but doing those
// operations in the opposite order resulted in PMF regression on Mac (see
// crbug.com/1153021).
if (change_permissions) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
}
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
// a MAP_FIXED request is successful, then any previous mappings [...] for
// those whole pages containing any part of the address range [pa,pa+len)
// shall be removed, as if by an appropriate call to munmap(), before the
// new mapping is established." As a consequence, the memory will be
// zero-initialized on next access.
void* ptr = reinterpret_cast<void*>(address);
void* ret = mmap(ptr, length, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
PA_CHECK(ptr == ret);
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length, accessibility);
}
#if BUILDFLAG(IS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
#endif
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
if (!ok)
return false;
}
#if BUILDFLAG(IS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
#endif
return true;
}
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
#if !BUILDFLAG(IS_NACL)
void* ptr = reinterpret_cast<void*>(address);
#if BUILDFLAG(IS_APPLE)
int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
if (ret) {
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
ret = madvise(ptr, length, MADV_DONTNEED);
}
PA_PCHECK(ret == 0);
#else
// We have experimented with other flags, but with suboptimal results.
//
// MADV_FREE (Linux): Makes our memory measurements less predictable;
// performance benefits unclear.
//
// Therefore, we just do the simple thing: MADV_DONTNEED.
PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED));
#endif
#endif // !BUILDFLAG(IS_NACL)
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_

View File

@ -0,0 +1,270 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#include <versionhelpers.h>
#include <cstdint>
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal {
namespace {
// On Windows, discarded pages are not returned to the system immediately and
// not guaranteed to be zeroed when returned to the application.
using DiscardVirtualMemoryFunction = DWORD(WINAPI*)(PVOID virtualAddress,
SIZE_T size);
DiscardVirtualMemoryFunction s_discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
} // namespace
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
bool IsOutOfMemory(DWORD error) {
// From
// https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
switch (error) {
// Page file is being extended.
case ERROR_COMMITMENT_MINIMUM:
// Page file is too small.
case ERROR_COMMITMENT_LIMIT:
#if defined(PA_HAS_64_BITS_POINTERS)
// Not enough memory resources are available to process this command.
//
// It is not entirely clear whether this error pertains to out of address
// space errors, or the kernel being out of memory. Only include it for 64
// bit architectures, since address space issues are unlikely there.
case ERROR_NOT_ENOUGH_MEMORY:
#endif
case ERROR_PAGEFILE_QUOTA:
// Insufficient quota to complete the requested service.
return true;
default:
return false;
}
}
void* VirtualAllocWithRetry(void* address,
size_t size,
DWORD type_flags,
DWORD access_flags) {
void* ret = nullptr;
// Failure to commit memory can be temporary, in at least two cases:
// - The page file is getting extended.
// - Another process terminates (most likely because of OOM)
//
// Wait and retry, since the alternative is crashing. Note that if we
// selectively apply this... hum... beautiful hack to some process types only,
// "some process crashing" may very well be one of ours, which may be
// desirable (e.g. some processes like the browser are more important than
// others).
//
// This approach has been shown to be effective for Firefox, see
// crbug.com/1392738 for context. Constants below are accordingly taken from
// Firefox as well.
constexpr int kMaxTries = 10;
constexpr int kDelayMs = 50;
bool should_retry = GetRetryOnCommitFailure() && (type_flags & MEM_COMMIT) &&
(access_flags != PAGE_NOACCESS);
for (int tries = 0; tries < kMaxTries; tries++) {
ret = VirtualAlloc(address, size, type_flags, access_flags);
// Only retry for commit failures. If this is an address space problem
// (e.g. caller asked for an address which is not available), this is
// unlikely to be resolved by waiting.
if (ret || !should_retry || !IsOutOfMemory(GetLastError()))
break;
Sleep(kDelayMs);
}
return ret;
}
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return PAGE_READONLY;
case PageAccessibilityConfiguration::kReadWrite:
case PageAccessibilityConfiguration::kReadWriteTagged:
return PAGE_READWRITE;
case PageAccessibilityConfiguration::kReadExecute:
case PageAccessibilityConfiguration::kReadExecuteProtected:
return PAGE_EXECUTE_READ;
case PageAccessibilityConfiguration::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
default:
PA_NOTREACHED();
[[fallthrough]];
case PageAccessibilityConfiguration::kInaccessible:
return PAGE_NOACCESS;
}
}
uintptr_t SystemAllocPagesInternal(
uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
DWORD access_flag = GetAccessFlags(accessibility);
const DWORD type_flags = (accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible)
? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE;
void* ret = VirtualAllocWithRetry(reinterpret_cast<void*>(hint), length,
type_flags, access_flag);
if (ret == nullptr) {
s_allocPageErrorCode = GetLastError();
}
return reinterpret_cast<uintptr_t>(ret);
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
uintptr_t ret = base_address;
if (pre_slack || post_slack) {
// We cannot resize the allocation run. Free it and retry at the aligned
// address within the freed range.
ret = base_address + pre_slack;
FreePages(base_address, base_length);
ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
}
return ret;
}
bool TrySetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
// Call the retry path even though this function can fail, because callers of
// this are likely to crash the process when this function fails, and we don't
// want that for transient failures.
return nullptr != VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
GetAccessFlags(accessibility));
}
void SetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible) {
if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
}
} else {
if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT)
OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(ERROR_SUCCESS == error);
}
}
}
void FreePagesInternal(uintptr_t address, size_t length) {
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), 0, MEM_RELEASE));
}
void DecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
// "If a page is decommitted but not released, its state changes to reserved.
// Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
// release it. Attempts to read from or write to a reserved page results in an
// access violation exception."
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
// for MEM_COMMIT: "The function also guarantees that when the caller later
// initially accesses the memory, the contents will be zero."
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), length, MEM_DECOMMIT));
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length, accessibility);
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
return TrySetSystemPagesAccess(address, length, accessibility);
}
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
if (s_discard_virtual_memory ==
reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) {
// DiscardVirtualMemory's minimum supported client is Windows 8.1 Update.
// So skip GetProcAddress("DiscardVirtualMemory") if windows version is
// smaller than Windows 8.1.
if (IsWindows8Point1OrGreater()) {
s_discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
} else {
s_discard_virtual_memory = nullptr;
}
}
void* ptr = reinterpret_cast<void*>(address);
// Use DiscardVirtualMemory when available because it releases faster than
// MEM_RESET.
DWORD ret = 1;
if (s_discard_virtual_memory) {
ret = s_discard_virtual_memory(ptr, length);
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
if (ret) {
PA_CHECK(VirtualAllocWithRetry(ptr, length, MEM_RESET, PAGE_READWRITE));
}
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_

View File

@ -0,0 +1,451 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_address_space.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/compressed_pointer.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_IOS)
#include <mach-o/dyld.h>
#endif
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#endif // BUILDFLAG(IS_WIN)
#if defined(PA_ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_PKEYS)
#include <sys/mman.h>
#endif
namespace partition_alloc::internal {
#if defined(PA_HAS_64_BITS_POINTERS)
namespace {
#if BUILDFLAG(IS_WIN)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
bool IsLegacyWindowsVersion() {
// Use ::RtlGetVersion instead of ::GetVersionEx or helpers from
// VersionHelpers.h because those alternatives change their behavior depending
// on whether or not the calling executable has a compatibility manifest
// resource. It's better for the allocator to not depend on that to decide the
// pool size.
// Assume legacy if ::RtlGetVersion is not available or it fails.
using RtlGetVersion = LONG(WINAPI*)(OSVERSIONINFOEX*);
const RtlGetVersion rtl_get_version = reinterpret_cast<RtlGetVersion>(
::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "RtlGetVersion"));
if (!rtl_get_version)
return true;
OSVERSIONINFOEX version_info = {};
version_info.dwOSVersionInfoSize = sizeof(version_info);
if (rtl_get_version(&version_info) != ERROR_SUCCESS)
return true;
// Anything prior to Windows 8.1 is considered legacy for the allocator.
// Windows 8.1 is major 6 with minor 3.
return version_info.dwMajorVersion < 6 ||
(version_info.dwMajorVersion == 6 && version_info.dwMinorVersion < 3);
}
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}
PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}
#endif // BUILDFLAG(IS_WIN)
PA_NOINLINE void HandlePoolAllocFailure() {
PA_NO_CODE_FOLDING();
uint32_t alloc_page_error_code = GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
// It's important to easily differentiate these two failures on Windows, so
// crash with different stacks.
#if BUILDFLAG(IS_WIN)
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
// it must be VA space exhaustion.
HandlePoolAllocFailureOutOfVASpace();
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
// On Windows <8.1, MEM_RESERVE increases commit charge to account for
// not-yet-committed PTEs needed to cover that VA space, if it was to be
// committed (see crbug.com/1101421#c16).
HandlePoolAllocFailureOutOfCommitCharge();
} else
#endif // BUILDFLAG(IS_WIN)
{
PA_CHECK(false);
}
}
} // namespace
#if BUILDFLAG(ENABLE_PKEYS)
alignas(PA_PKEY_ALIGN_SZ)
#else
alignas(kPartitionCachelineSize)
#endif
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
#if defined(PA_ENABLE_SHADOW_METADATA)
std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(IS_IOS)
namespace {
bool IsIOSTestProcess() {
// On iOS, only applications with the extended virtual addressing entitlement
// can use a large address space. Since Earl Grey test runner apps cannot get
// entitlements, they must use a much smaller pool size. Similarly,
// integration tests for ChromeWebView end up with two PartitionRoots since
// both the integration tests and ChromeWebView have a copy of base/. Even
// with the entitlement, there is insufficient address space for two
// PartitionRoots, so a smaller pool size is needed.
// Use a fixed buffer size to avoid allocation inside the allocator.
constexpr size_t path_buffer_size = 8192;
char executable_path[path_buffer_size];
uint32_t executable_length = path_buffer_size;
int rv = _NSGetExecutablePath(executable_path, &executable_length);
PA_CHECK(!rv);
size_t executable_path_length =
std::char_traits<char>::length(executable_path);
auto has_suffix = [&](const char* suffix) -> bool {
size_t suffix_length = std::char_traits<char>::length(suffix);
if (executable_path_length < suffix_length)
return false;
return std::char_traits<char>::compare(
executable_path + (executable_path_length - suffix_length),
suffix, suffix_length) == 0;
};
return has_suffix("Runner") || has_suffix("ios_web_view_inttests");
}
} // namespace
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
: kRegularPoolSize;
}
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
}
#else
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
return IsLegacyWindowsVersion() ? kRegularPoolSizeForLegacyWindows
: kRegularPoolSize;
}
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsLegacyWindowsVersion() ? kBRPPoolSizeForLegacyWindows : kBRPPoolSize;
}
#endif // BUILDFLAG(IS_IOS)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
void PartitionAddressSpace::Init() {
if (IsInitialized())
return;
size_t regular_pool_size = RegularPoolSize();
size_t brp_pool_size = BRPPoolSize();
#if defined(PA_GLUE_CORE_POOLS)
// Gluing core pools (regular & BRP) makes sense only when both pools are of
// the same size. This the only way we can check belonging to either of the
// two with a single bitmask operation.
PA_CHECK(regular_pool_size == brp_pool_size);
// TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
int pools_fd = -1;
size_t glued_pool_sizes = regular_pool_size * 2;
// Note, BRP pool requires to be preceded by a "forbidden zone", which is
// conveniently taken care of by the last guard page of the regular pool.
setup_.regular_pool_base_address_ =
AllocPages(glued_pool_sizes, glued_pool_sizes,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, pools_fd);
if (!setup_.regular_pool_base_address_)
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ =
setup_.regular_pool_base_address_ + regular_pool_size;
#else // defined(PA_GLUE_CORE_POOLS)
#if defined(PA_ENABLE_SHADOW_METADATA)
int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
#else
int regular_pool_fd = -1;
#endif
setup_.regular_pool_base_address_ =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, regular_pool_fd);
if (!setup_.regular_pool_base_address_)
HandlePoolAllocFailure();
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
#else
int brp_pool_fd = -1;
#endif
// Reserve an extra allocation granularity unit before the BRP pool, but keep
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
// is a valid pointer, and having a "forbidden zone" before the BRP pool
// prevents such a pointer from "sneaking into" the pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
uintptr_t base_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, brp_pool_fd);
if (!base_address)
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#endif // defined(PA_GLUE_CORE_POOLS)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#if defined(PA_GLUE_CORE_POOLS)
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
AddressPoolManager::GetInstance().Add(
kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
// Sanity check pool alignment.
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
#if defined(PA_GLUE_CORE_POOLS)
PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
#endif
// Sanity check pool belonging.
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
regular_pool_size - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
#if defined(PA_GLUE_CORE_POOLS)
PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
#endif // defined(PA_GLUE_CORE_POOLS)
#if PA_STARSCAN_USE_CARD_TABLE
// Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool";
#endif // PA_STARSCAN_USE_CARD_TABLE
#if defined(PA_ENABLE_SHADOW_METADATA)
// Reserve memory for the shadow pools.
uintptr_t regular_pool_shadow_address =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, regular_pool_fd);
regular_pool_shadow_offset_ =
regular_pool_shadow_address - setup_.regular_pool_base_address_;
uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, brp_pool_fd);
brp_pool_shadow_offset_ =
brp_pool_shadow_address - setup_.brp_pool_base_address_;
#endif
#if defined(PA_POINTER_COMPRESSION)
CompressedPointerBaseGlobal::SetBase(setup_.regular_pool_base_address_);
#endif // defined(PA_POINTER_COMPRESSION)
}
void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
size_t size) {
// The ConfigurablePool must only be initialized once.
PA_CHECK(!IsConfigurablePoolInitialized());
#if BUILDFLAG(ENABLE_PKEYS)
// It's possible that the pkey pool has been initialized first, in which case
// the setup_ memory has been made read-only. Remove the protection
// temporarily.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(kDefaultPkey);
#endif
PA_CHECK(pool_base);
PA_CHECK(size <= kConfigurablePoolMaxSize);
PA_CHECK(size >= kConfigurablePoolMinSize);
PA_CHECK(base::bits::IsPowerOfTwo(size));
PA_CHECK(pool_base % size == 0);
setup_.configurable_pool_base_address_ = pool_base;
setup_.configurable_pool_base_mask_ = ~(size - 1);
AddressPoolManager::GetInstance().Add(
kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
#if BUILDFLAG(ENABLE_PKEYS)
// Put the pkey protection back in place.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(setup_.pkey_);
#endif
}
#if BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::InitPkeyPool(int pkey) {
// The PkeyPool can't be initialized with conflicting pkeys.
if (IsPkeyPoolInitialized()) {
PA_CHECK(setup_.pkey_ == pkey);
return;
}
size_t pool_size = PkeyPoolSize();
setup_.pkey_pool_base_address_ =
AllocPages(pool_size, pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
if (!setup_.pkey_pool_base_address_)
HandlePoolAllocFailure();
PA_DCHECK(!(setup_.pkey_pool_base_address_ & (pool_size - 1)));
setup_.pkey_ = pkey;
AddressPoolManager::GetInstance().Add(
kPkeyPoolHandle, setup_.pkey_pool_base_address_, pool_size);
PA_DCHECK(!IsInPkeyPool(setup_.pkey_pool_base_address_ - 1));
PA_DCHECK(IsInPkeyPool(setup_.pkey_pool_base_address_));
PA_DCHECK(IsInPkeyPool(setup_.pkey_pool_base_address_ + pool_size - 1));
PA_DCHECK(!IsInPkeyPool(setup_.pkey_pool_base_address_ + pool_size));
// TODO(1362969): support PA_ENABLE_SHADOW_METADATA
}
#endif // BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::UninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
UninitPkeyPoolForTesting(); // IN-TEST
#endif
#if defined(PA_GLUE_CORE_POOLS)
// The core pools (regular & BRP) were allocated using a single allocation of
// double size.
FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
#else // defined(PA_GLUE_CORE_POOLS)
FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
// For BRP pool, the allocation region includes a "forbidden zone" before the
// pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
BRPPoolSize() + kForbiddenZoneSize);
#endif // defined(PA_GLUE_CORE_POOLS)
// Do not free pages for the configurable pool, because its memory is owned
// by someone else, but deinitialize it nonetheless.
setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
AddressPoolManager::GetInstance().ResetForTesting();
#if defined(PA_POINTER_COMPRESSION)
CompressedPointerBaseGlobal::ResetBaseForTesting();
#endif // defined(PA_POINTER_COMPRESSION)
}
void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
// It's possible that the pkey pool has been initialized first, in which case
// the setup_ memory has been made read-only. Remove the protection
// temporarily.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(kDefaultPkey);
#endif
AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
#if BUILDFLAG(ENABLE_PKEYS)
// Put the pkey protection back in place.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(setup_.pkey_);
#endif
}
#if BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::UninitPkeyPoolForTesting() {
if (IsPkeyPoolInitialized()) {
TagGlobalsWithPkey(kDefaultPkey);
PkeySettings::settings.enabled = false;
FreePages(setup_.pkey_pool_base_address_, PkeyPoolSize());
AddressPoolManager::GetInstance().Remove(kPkeyPoolHandle);
setup_.pkey_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.pkey_ = kInvalidPkey;
}
}
#endif
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
PageCharacteristics page_characteristics;
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
#endif // defined(PA_HAS_64_BITS_POINTERS)
} // namespace partition_alloc::internal

View File

@ -0,0 +1,490 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#include <algorithm>
#include <array>
#include <cstddef>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
// The feature is not applicable to 32-bit address space.
#if defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc {
namespace internal {
// Manages PartitionAlloc address space, which is split into pools.
// See `glossary.md`.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public:
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_;
}
#else
static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
return kRegularPoolBaseMask;
}
#endif
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!IsInBRPPool(address));
#endif
pool_handle pool = 0;
uintptr_t base = 0;
if (IsInRegularPool(address)) {
pool = kRegularPoolHandle;
base = setup_.regular_pool_base_address_;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInBRPPool(address)) {
pool = kBRPPoolHandle;
base = setup_.brp_pool_base_address_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInConfigurablePool(address)) {
PA_DCHECK(IsConfigurablePoolInitialized());
pool = kConfigurablePoolHandle;
base = setup_.configurable_pool_base_address_;
#if BUILDFLAG(ENABLE_PKEYS)
} else if (IsInPkeyPool(address)) {
pool = kPkeyPoolHandle;
base = setup_.pkey_pool_base_address_;
#endif
} else {
PA_NOTREACHED();
}
return std::make_pair(pool, address - base);
}
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
return kConfigurablePoolMaxSize;
}
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
return kConfigurablePoolMinSize;
}
// Initialize pools (except for the configurable one).
//
// This function must only be called from the main thread.
static void Init();
// Initialize the ConfigurablePool at the given address |pool_base|. It must
// be aligned to the size of the pool. The size must be a power of two and
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
//
// This function must only be called from the main thread.
static void InitConfigurablePool(uintptr_t pool_base, size_t size);
#if BUILDFLAG(ENABLE_PKEYS)
static void InitPkeyPool(int pkey);
static void UninitPkeyPoolForTesting();
#endif
static void UninitForTesting();
static void UninitConfigurablePoolForTesting();
static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The
// configurable and pkey pool are initialized separately.
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
return true;
}
PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
return false;
}
static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
return setup_.configurable_pool_base_address_ !=
kUninitializedPoolBaseAddress;
}
#if BUILDFLAG(ENABLE_PKEYS)
static PA_ALWAYS_INLINE bool IsPkeyPoolInitialized() {
return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
}
#endif
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
#endif
return (address & regular_pool_base_mask) ==
setup_.regular_pool_base_address_;
}
static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
return setup_.regular_pool_base_address_;
}
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
#endif
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
}
#if defined(PA_GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInCorePools(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
#else
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
#endif
bool ret =
(address & core_pools_base_mask) == setup_.regular_pool_base_address_;
PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
return ret;
}
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE size_t CorePoolsSize() {
return RegularPoolSize() * 2;
}
#else // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE constexpr size_t CorePoolsSize() {
return RegularPoolSize() * 2;
}
#endif // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#endif // defined(PA_GLUE_CORE_POOLS)
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
}
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) ==
setup_.configurable_pool_base_address_;
}
static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
return setup_.configurable_pool_base_address_;
}
#if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInPkeyPool(uintptr_t address) {
return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
}
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
if (pool == kRegularPoolHandle) {
return regular_pool_shadow_offset_;
} else if (pool == kBRPPoolHandle) {
return brp_pool_shadow_offset_;
} else {
// TODO(crbug.com/1362969): Add shadow for configurable pool as well.
// Shadow is not created for ConfigurablePool for now, so this part should
// be unreachable.
PA_NOTREACHED();
return 0;
}
}
#endif
// PartitionAddressSpace is static_only class.
PartitionAddressSpace() = delete;
PartitionAddressSpace(const PartitionAddressSpace&) = delete;
void* operator new(size_t) = delete;
void* operator new(size_t, void*) = delete;
private:
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE size_t RegularPoolSize();
static PA_ALWAYS_INLINE size_t BRPPoolSize();
#else
// The pool sizes should be as large as maximum whenever possible.
constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
return kRegularPoolSize;
}
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
return kBRPPoolSize;
}
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS)
constexpr static PA_ALWAYS_INLINE size_t PkeyPoolSize() {
return kPkeyPoolSize;
}
#endif
// On 64-bit systems, PA allocates from several contiguous, mutually disjoint
// pools. The BRP pool is where all allocations have a BRP ref-count, thus
// pointers pointing there can use a BRP protection against UaF. Allocations
// in the other pools don't have that.
//
// Pool sizes have to be the power of two. Each pool will be aligned at its
// own size boundary.
//
// NOTE! The BRP pool must be preceded by an inaccessible region. This is to
// prevent a pointer to the end of a non-BRP-pool allocation from falling into
// the BRP pool, thus triggering BRP mechanism and likely crashing. This
// "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
// allocation granularity unit.
//
// The ConfigurablePool is an optional Pool that can be created inside an
// existing mapping provided by the embedder. This Pool can be used when
// certain PA allocations must be located inside a given virtual address
// region. One use case for this Pool is V8 Sandbox, which requires that
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSize));
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr size_t kPkeyPoolSize = kGiB / 4;
static_assert(base::bits::IsPowerOfTwo(kPkeyPoolSize));
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
// crbug.com/1101421 and crbug.com/1217759).
static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB;
static constexpr size_t kBRPPoolSizeForLegacyWindows = 4 * kGiB;
static_assert(kRegularPoolSizeForLegacyWindows < kRegularPoolSize);
static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize));
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
#if BUILDFLAG(IS_IOS)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#error iOS is only supported with a dynamically sized GigaCase.
#endif
// We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
// since the test process cannot use an extended virtual address space (see
// crbug.com/1250788).
static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
#endif // BUILDFLAG(IOS_IOS)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// Masks used to easy determine belonging to a pool.
static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1;
static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
#endif // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr uintptr_t kPkeyPoolOffsetMask =
static_cast<uintptr_t>(kPkeyPoolSize) - 1;
static constexpr uintptr_t kPkeyPoolBaseMask = ~kPkeyPoolOffsetMask;
#endif
// This must be set to such a value that IsIn*Pool() always returns false when
// the pool isn't initialized.
static constexpr uintptr_t kUninitializedPoolBaseAddress =
static_cast<uintptr_t>(-1);
struct PoolSetup {
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
constexpr PoolSetup()
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
#if BUILDFLAG(ENABLE_PKEYS)
pkey_pool_base_address_(kUninitializedPoolBaseAddress),
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
regular_pool_base_mask_(0),
brp_pool_base_mask_(0),
#if defined(PA_GLUE_CORE_POOLS)
core_pools_base_mask_(0),
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
configurable_pool_base_mask_(0)
#if BUILDFLAG(ENABLE_PKEYS)
,
pkey_(kInvalidPkey)
#endif
{
}
// Using a union to enforce padding.
union {
struct {
uintptr_t regular_pool_base_address_;
uintptr_t brp_pool_base_address_;
uintptr_t configurable_pool_base_address_;
#if BUILDFLAG(ENABLE_PKEYS)
uintptr_t pkey_pool_base_address_;
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t regular_pool_base_mask_;
uintptr_t brp_pool_base_mask_;
#if defined(PA_GLUE_CORE_POOLS)
uintptr_t core_pools_base_mask_;
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t configurable_pool_base_mask_;
#if BUILDFLAG(ENABLE_PKEYS)
int pkey_;
#endif
};
#if BUILDFLAG(ENABLE_PKEYS)
// With pkey support, we want to be able to pkey-tag all global metadata
// which requires page granularity.
char one_page_[SystemPageSize()];
#else
char one_cacheline_[kPartitionCachelineSize];
#endif
};
};
#if BUILDFLAG(ENABLE_PKEYS)
static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
"PoolSetup has to fill a page(s)");
#else
static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
"PoolSetup has to fill a cacheline(s)");
#endif
// See the comment describing the address layout above.
//
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
#if BUILDFLAG(ENABLE_PKEYS)
static_assert(PA_PKEY_ALIGN_SZ >= kPartitionCachelineSize);
alignas(PA_PKEY_ALIGN_SZ)
#else
alignas(kPartitionCachelineSize)
#endif
static PoolSetup setup_ PA_CONSTINIT;
#if defined(PA_ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
static std::ptrdiff_t brp_pool_shadow_offset_;
#endif
#if BUILDFLAG(ENABLE_PKEYS)
// If we use a pkey pool, we need to tag its metadata with the pkey. Allow the
// function to get access to the PoolSetup.
friend void TagGlobalsWithPkey(int pkey);
#endif
};
PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
return PartitionAddressSpace::GetPoolAndOffset(address);
}
PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
return std::get<0>(GetPoolAndOffset(address));
}
PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
return PartitionAddressSpace::OffsetInBRPPool(address);
}
#if defined(PA_ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
return PartitionAddressSpace::ShadowPoolOffset(pool);
}
#endif
} // namespace internal
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
#endif
return internal::PartitionAddressSpace::IsInRegularPool(address)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::PartitionAddressSpace::IsInBRPPool(address)
#endif
#if BUILDFLAG(ENABLE_PKEYS)
|| internal::PartitionAddressSpace::IsInPkeyPool(address)
#endif
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInRegularPool(address);
}
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInBRPPool(address);
}
#if defined(PA_GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
return internal::PartitionAddressSpace::IsInCorePools(address);
}
#endif // defined(PA_GLUE_CORE_POOLS)
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
uintptr_t address) {
return internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
#if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocPkeyPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInPkeyPool(address);
}
#endif
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
}
} // namespace partition_alloc
#endif // defined(PA_HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -0,0 +1,103 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#include <algorithm>
#include <cstring>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
// Prefetch *x into memory.
#if defined(__clang__) || defined(COMPILER_GCC)
#define PA_PREFETCH(x) __builtin_prefetch(x)
#else
#define PA_PREFETCH(x)
#endif
namespace partition_alloc::internal {
// This is a `memset` that resists being optimized away. Adapted from
// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
// depend on //third_party, and this is small enough.)
#if defined(COMPILER_MSVC) && !defined(__clang__)
// MSVC only supports inline assembly on x86. This preprocessor directive
// is intended to be a replacement for the same.
//
// TODO(crbug.com/1351310): Make sure inlining doesn't degrade this into
// a no-op or similar. The documentation doesn't say.
#pragma optimize("", off)
#endif
PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
memset(ptr, value, size);
#if !defined(COMPILER_MSVC) || defined(__clang__)
// As best as we can tell, this is sufficient to break any optimisations that
// might try to eliminate "superfluous" memsets. If there's an easy way to
// detect memset_s, it would be better to use that.
__asm__ __volatile__("" : : "r"(ptr) : "memory");
#endif // !defined(COMPILER_MSVC) || defined(__clang__)
}
#if defined(COMPILER_MSVC) && !defined(__clang__)
#pragma optimize("", on)
#endif
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Used to memset() memory for debugging purposes only.
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses
// of uininitialized / freed memory, and makes tests run significantly
// faster. Note that for direct-mapped allocations, memory is decomitted at
// free() time, so freed memory usage cannot happen.
#if BUILDFLAG(ENABLE_PKEYS)
LiftPkeyRestrictionsScope lift_pkey_restrictions;
#endif
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}
#endif // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics.
#if !BUILDFLAG(PA_DCHECK_IS_ON)
PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(RandomValue());
}
// If `counter` is 0, this will wrap. That is intentional and OK.
counter--;
return counter == 0;
}
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
return UntagPtr(ptr);
}
PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(const void* object) {
// TODO(bartekn): Check that |object| is indeed an object start.
return ObjectInnerPtr2Addr(object);
}
PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return TagAddr(slot_start);
}
PA_ALWAYS_INLINE uintptr_t SlotStartPtr2Addr(const void* slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return UntagPtr(slot_start);
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_

View File

@ -0,0 +1,154 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc.h"
#include <string.h>
#include <cstdint>
#include <memory>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#if BUILDFLAG(STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif // BUILDFLAG(STARSCAN)
namespace partition_alloc {
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
// This is from page_allocator_constants.h and doesn't really fit here, but
// there isn't a centralized initialization function in page_allocator.cc, so
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
STATIC_ASSERT_OR_PA_CHECK(
(internal::SystemPageSize() & internal::SystemPageOffsetMask()) == 0,
"SystemPageSize() must be power of 2");
// Two partition pages are used as guard / metadata page so make sure the
// super page size is bigger.
STATIC_ASSERT_OR_PA_CHECK(
internal::PartitionPageSize() * 4 <= internal::kSuperPageSize,
"ok super page size");
STATIC_ASSERT_OR_PA_CHECK(
(internal::kSuperPageSize & internal::SystemPageOffsetMask()) == 0,
"ok super page multiple");
// Four system pages gives us room to hack out a still-guard-paged piece
// of metadata in the middle of a guard partition page.
STATIC_ASSERT_OR_PA_CHECK(
internal::SystemPageSize() * 4 <= internal::PartitionPageSize(),
"ok partition page size");
STATIC_ASSERT_OR_PA_CHECK(
(internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
"ok partition page multiple");
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
internal::kPageMetadataSize,
"PartitionPage should not be too big");
STATIC_ASSERT_OR_PA_CHECK(
internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=
internal::SystemPageSize(),
"page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size.
STATIC_ASSERT_OR_PA_CHECK(
internal::MaxDirectMapped() <=
(1UL << 31) + internal::DirectMapAllocationGranularity(),
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
static_assert(internal::kSmallestBucket == internal::kAlignment,
"generic smallest bucket");
static_assert(internal::kMaxBucketed == 983040, "generic max bucketed");
STATIC_ASSERT_OR_PA_CHECK(
internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
"System pages per slot span must be no greater than 16.");
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
STATIC_ASSERT_OR_PA_CHECK(
internal::GetPartitionRefCountIndexMultiplierShift() <
std::numeric_limits<size_t>::max() / 2,
"Calculation in GetPartitionRefCountIndexMultiplierShift() must not "
"underflow.");
// Check that the GetPartitionRefCountIndexMultiplierShift() calculation is
// correct.
STATIC_ASSERT_OR_PA_CHECK(
(1 << internal::GetPartitionRefCountIndexMultiplierShift()) ==
(internal::SystemPageSize() /
(sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))),
"Bitshift must match the intended multiplication.");
STATIC_ASSERT_OR_PA_CHECK(
((sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))
<< internal::GetPartitionRefCountIndexMultiplierShift()) <=
internal::SystemPageSize(),
"PartitionRefCount Bitmap size must be smaller than or equal to "
"<= SystemPageSize().");
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
}
void PartitionAllocGlobalUninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
internal::PartitionAddressSpace::UninitPkeyPoolForTesting();
#endif
#if BUILDFLAG(STARSCAN)
internal::PCScan::UninitForTesting(); // IN-TEST
#endif // BUILDFLAG(STARSCAN)
#if !BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
#if defined(PA_HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance().ResetForTesting();
#endif // defined(PA_HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
internal::g_oom_handling_function = nullptr;
}
namespace internal {
template <bool thread_safe>
PartitionAllocator<thread_safe>::~PartitionAllocator() {
MemoryReclaimer::Instance()->UnregisterPartition(&partition_root_);
}
template <bool thread_safe>
void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
#if BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
#endif
partition_root_.Init(opts);
MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
}
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
#if (BUILDFLAG(PA_DCHECK_IS_ON) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
void CheckThatSlotOffsetIsZero(uintptr_t address) {
// Add kPartitionPastAllocationAdjustment, because
// PartitionAllocGetSlotStartInBRPPool will subtract it.
PA_CHECK(PartitionAllocGetSlotStartInBRPPool(
address + kPartitionPastAllocationAdjustment) == address);
}
#endif
} // namespace internal
} // namespace partition_alloc

Some files were not shown because too many files have changed in this diff Show More