Import chromium-101.0.4951.41

This commit is contained in:
importer 2022-05-02 21:29:43 +08:00 committed by klzgrad
commit 2d8a3e432b
11201 changed files with 2870472 additions and 0 deletions

39
src/.clang-format Normal file
View File

@ -0,0 +1,39 @@
# Defines the Chromium style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium
# This defaults to 'Auto'. Explicitly set it for a while, so that
# 'vector<vector<int> >' in existing files gets formatted to
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
# 'int>>' if the file already contains at least one such instance.)
Standard: Cpp11
# Make sure code like:
# IPC_BEGIN_MESSAGE_MAP()
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
# IPC_END_MESSAGE_MAP()
# gets correctly indented.
MacroBlockBegin: "^\
BEGIN_MSG_MAP|\
BEGIN_MSG_MAP_EX|\
BEGIN_SAFE_MSG_MAP_EX|\
CR_BEGIN_MSG_MAP_EX|\
IPC_BEGIN_MESSAGE_MAP|\
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
IPC_STRUCT_BEGIN|\
IPC_STRUCT_BEGIN_WITH_PARENT|\
IPC_STRUCT_TRAITS_BEGIN|\
POLPARAMS_BEGIN|\
PPAPI_BEGIN_MESSAGE_MAP$"
MacroBlockEnd: "^\
CR_END_MSG_MAP|\
END_MSG_MAP|\
IPC_END_MESSAGE_MAP|\
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
IPC_STRUCT_END|\
IPC_STRUCT_TRAITS_END|\
POLPARAMS_END|\
PPAPI_END_MESSAGE_MAP$"
# TODO: Remove this once clang-format r357700 is rolled in.
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']

58
src/.gitattributes vendored Normal file
View File

@ -0,0 +1,58 @@
# Stop Windows python license check presubmit errors by forcing LF checkout.
*.py text eol=lf
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
/net/http/*.pins text eol=lf
# Force LF checkout for all source files
*.bin binary
*.c text eol=lf
*.cc text eol=lf
*.cpp text eol=lf
*.csv text eol=lf
*.grd text eol=lf
*.grdp text eol=lf
*.gn text eol=lf
*.gni text eol=lf
*.h text eol=lf
*.html text eol=lf
*.idl text eol=lf
*.in text eol=lf
*.inc text eol=lf
*.java text eol=lf
*.js text eol=lf
*.json text eol=lf
*.json5 text eol=lf
*.md text eol=lf
*.mm text eol=lf
*.mojom text eol=lf
*.pdf -diff
*.proto text eol=lf
*.rs text eol=lf
*.sh text eol=lf
*.sql text eol=lf
*.toml text eol=lf
*.txt text eol=lf
*.xml text eol=lf
*.xslt text eol=lf
.clang-format text eol=lf
.eslintrc.js text eol=lf
.git-blame-ignore-revs text eol=lf
.gitattributes text eol=lf
.gitignore text eol=lf
.vpython text eol=lf
codereview.settings text eol=lf
DEPS text eol=lf
ENG_REVIEW_OWNERS text eol=lf
LICENSE text eol=lf
LICENSE.* text eol=lf
MAJOR_BRANCH_DATE text eol=lf
OWNERS text eol=lf
README text eol=lf
README.* text eol=lf
WATCHLISTS text eol=lf
VERSION text eol=lf
DIR_METADATA text eol=lf
# Skip Tricium by default on files in third_party.
third_party/** -tricium

168
src/.gn Normal file
View File

@ -0,0 +1,168 @@
# This file is used by the GN meta build system to find the root of the source
# tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line.
import("//build/dotfile_settings.gni")
import("//third_party/angle/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"
# These arguments override the default values for items in a declare_args
# block. "gn args" in turn can override these.
#
# In general the value for a build arg in the declare_args block should be the
# default. In some cases, a DEPS-ed in project will want different defaults for
# being built as part of Chrome vs. being built standalone. In this case, the
# Chrome defaults should go here. There should be no overrides here for
# values declared in the main Chrome repository.
#
# Important note for defining defaults: This file is executed before the
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
# This means that the default_args can not depend on the platform,
# architecture, or other build parameters. If you really need that, the other
# repo should define a flag that toggles on a behavior that implements the
# additional logic required by Chrome to set the variables.
default_args = {
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
# a warning that "Build argument has no effect". When adding a v8 variable, it
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
# location when it is removed).
v8_extra_library_files = []
v8_experimental_extra_library_files = []
v8_enable_gdbjit = false
v8_imminent_deprecation_warnings = false
# Don't include webrtc's builtin task queue implementation.
rtc_link_task_queue_impl = false
# Don't include the iLBC audio codec.
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
# deps on codecs, we can remove this.
rtc_include_ilbc = false
# Changes some setup for the Crashpad build to set them to build against
# Chromium's zlib, base, etc.
crashpad_dependencies = "chromium"
# Override ANGLE's Vulkan dependencies.
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
angle_vulkan_validation_layers_dir =
"//third_party/vulkan-deps/vulkan-validation-layers/src"
# Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 7
devtools_visibility = [ "*" ]
}
# These are the targets to skip header checking by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will not have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
no_check_targets = [
"//headless:headless_non_renderer", # 9 errors
"//headless:headless_renderer", # 13 errors
"//headless:headless_shared_sources", # 4 errors
"//headless:headless_shell_browser_lib", # 10 errors
"//headless:headless_shell_lib", # 10 errors
# //v8, https://crbug.com/v8/7330
"//v8/src/inspector:inspector", # 20 errors
"//v8/test/cctest:cctest_sources", # 2 errors
"//v8:cppgc_base", # 1 error
"//v8:v8_internal_headers", # 11 errors
"//v8:v8_libplatform", # 2 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
# PLEASE READ
#
# You should almost never need to add new exec_script calls. exec_script is
# slow, especially on Windows, and can cause confusing effects. Although
# individually each call isn't slow or necessarily very confusing, at the scale
# of our repo things get out of hand quickly. By strongly pushing back on all
# additions, we keep the build fast and clean. If you think you need to add a
# new call, please consider:
#
# - Do not use a script to check for the existence of a file or directory to
# enable a different mode. Instead, use GN build args to enable or disable
# functionality and set options. An example is checking for a file in the
# src-internal repo to see if the corresponding src-internal feature should
# be enabled. There are several things that can go wrong with this:
#
# - It's mysterious what causes some things to happen. Although in many cases
# such behavior can be conveniently automatic, GN optimizes for explicit
# and obvious behavior so people can more easily diagnose problems.
#
# - The user can't enable a mode for one build and not another. With GN build
# args, the user can choose the exact configuration of multiple builds
# using one checkout. But implicitly basing flags on the state of the
# checkout, this functionality is broken.
#
# - It's easy to get stale files. If for example the user edits the gclient
# to stop checking out src-internal (or any other optional thing), it's
# easy to end up with stale files still mysteriously triggering build
# conditions that are no longer appropriate (yes, this happens in real
# life).
#
# - Do not use a script to iterate files in a directory (glob):
#
# - This has the same "stale file" problem as the above discussion. Various
# operations can leave untracked files in the source tree which can cause
# surprising effects.
#
# - It becomes impossible to use "git grep" to find where a certain file is
# referenced. This operation is very common and people really do get
# confused when things aren't listed.
#
# - It's easy to screw up. One common case is a build-time script that packs
# up a directory. The author notices that the script isn't re-run when the
# directory is updated, so adds a glob so all the files are listed as
# inputs. This seems to work great... until a file is deleted. When a
# file is deleted, all the inputs the glob lists will still be up to date
# and no command-lines will have been changed. The action will not be
# re-run and the build will be broken. It is possible to get this correct
# using glob, and it's possible to mess it up without glob, but globs make
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.
"//build_overrides/build.gni",
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
# TODO(dgn): Layer violation but breaks the build otherwise, see
# https://crbug.com/474506.
"//clank/java/BUILD.gn",
"//clank/native/BUILD.gn",
"//google_apis/BUILD.gn",
"//printing/BUILD.gn",
"//remoting/host/installer/linux/BUILD.gn",
"//remoting/remoting_version.gni",
"//remoting/host/installer/win/generate_clsids.gni",
"//tools/grit/grit_rule.gni",
"//tools/gritsettings/BUILD.gn",
]

1393
src/AUTHORS Normal file

File diff suppressed because it is too large Load Diff

1648
src/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

4657
src/DEPS Normal file

File diff suppressed because it is too large Load Diff

27
src/LICENSE Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4659
src/base/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

27
src/base/DEPS Normal file
View File

@ -0,0 +1,27 @@
include_rules = [
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
"+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",
"+third_party/perfetto/include",
"+third_party/perfetto/protos/perfetto",
# Conversions between base and Rust types (e.g. base::span <-> rust::Slice)
# require the cxx.h header from cxx. This is only used if Rust is enabled
# in the gn build; see //base/BUILD.gn's conditional dependency on
# //build/rust:cxx_cppdeps.
"+third_party/rust/cxx",
"+third_party/test_fonts",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",
"-url",
# ICU dependendencies must be separate from the rest of base.
"-i18n",
# //base/util can use //base but not vice versa.
"-util",
]

3
src/base/DIR_METADATA Normal file
View File

@ -0,0 +1,3 @@
monorail {
component: "Internals>Core"
}

41
src/base/OWNERS Normal file
View File

@ -0,0 +1,41 @@
# See //base/README.md to find qualification for being an owner.
set noparent
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes.
danakj@chromium.org
dcheng@chromium.org
fdoray@chromium.org
gab@chromium.org
jdoerrie@chromium.org
kylechar@chromium.org
mark@chromium.org
thakis@chromium.org
thestig@chromium.org
wez@chromium.org
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
# by emailing lsc-policy@chromium.org when this list changes.
# per-file rules:
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file BUILD.gn=*
# For Android-specific changes:
per-file ..._android*=file://base/android/OWNERS
# For Fuchsia-specific changes:
per-file ..._fuchsia*=file://build/fuchsia/OWNERS
# For Windows-specific changes:
per-file ..._win*=file://base/win/OWNERS
per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
# Restricted since rand_util.h also backs the cryptographically secure RNG.
per-file rand_util*=set noparent
per-file rand_util*=file://ipc/SECURITY_OWNERS
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS

144
src/base/PRESUBMIT.py Normal file
View File

@ -0,0 +1,144 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/base.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not "/ios/" in f.LocalPath() and
not "/test/" in f.LocalPath() and
not f.LocalPath().endswith('.java') and
not f.LocalPath().endswith('_unittest.mm') and
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
"""Returns locations matching one of the search_regexes."""
def FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
files_to_check=files_to_check,
files_to_skip=files_to_skip)
no_presubmit = r"// no-presubmit-check"
locations = []
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
for search_regex in search_regexes:
if (input_api.re.search(search_regex, line) and
not input_api.re.search(no_presubmit, line)):
locations.append(" %s:%d" % (f.LocalPath(), line_num))
break
return locations
def _CheckNoTraceEventInclude(input_api, output_api):
"""Verify that //base includes base_tracing.h instead of trace event headers.
Checks that files outside trace event implementation include the
base_tracing.h header instead of specific trace event implementation headers
to maintain compatibility with the gn flag "enable_base_tracing = false".
"""
discouraged_includes = [
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
r'^#include "third_party/perfetto/include/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
]
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitError(
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
'of trace_event implementation headers. If you need to include an\n' +
'implementation header, verify that "gn check" and base_unittests\n' +
'still pass with gn arg "enable_base_tracing = false" and add\n' +
'"// no-presubmit-check" after the include. \n' +
'\n'.join(locations)) ]
return []
def _WarnPbzeroIncludes(input_api, output_api):
"""Warn to check enable_base_tracing=false when including a pbzero header.
Emits a warning when including a perfetto pbzero header, encouraging the
user to verify that //base still builds with enable_base_tracing=false.
"""
warn_includes = [
r'^#include "third_party/perfetto/protos/',
r'^#include "base/tracing/protos/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
]
locations = _FindLocations(input_api, warn_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitPromptWarning(
'Please verify that "gn check" and base_unittests still pass with\n' +
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
'to exclude pbzero headers and anything not supported by\n' +
'//base/trace_event/trace_event_stub.h.\n' +
'\n'.join(locations)) ]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
results.extend(_WarnPbzeroIncludes(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results

81
src/base/README.md Normal file
View File

@ -0,0 +1,81 @@
# What is this
Contains a written down set of principles and other information on //base.
Please add to it!
## About //base:
Chromium is a very mature project. Most things that are generally useful are
already here and things not here aren't generally useful.
The bar for adding stuff to base is that it must have demonstrated wide
applicability. Prefer to add things closer to where they're used (i.e. "not
base"), and pull into base only when needed. In a project our size,
sometimes even duplication is OK and inevitable.
Adding a new logging macro `DPVELOG_NE` is not more clear than just
writing the stuff you want to log in a regular logging statement, even
if it makes your calling code longer. Just add it to your own code.
If the code in question does not need to be used inside base, but will have
multiple consumers across the codebase, consider placing it in a new directory
under components/ instead.
base is written for the Chromium project and is not intended to be used
outside it. Using base outside of src.git is explicitly not supported,
and base makes no guarantees about API (or even ABI) stability (like all
other code in Chromium). New code that depends on base/ must be in
src.git. Code that's not in src.git but pulled in through DEPS (for
example, v8) cannot use base.
## Qualifications for being in //base OWNERS
* interest and ability to learn low level/high detail/complex c++ stuff
* inclination to always ask why and understand everything (including external
interactions like win32) rather than just hoping the author did it right
* mentorship/experience
* demonstrated good judgement (esp with regards to public APIs) over a length
of time
Owners are added when a contributor has shown the above qualifications and
when they express interest. There isn't an upper bound on the number of OWNERS.
## Design and naming
* Be sure to use the base namespace.
* STL-like constructs should adhere as closely to STL as possible. Functions
and behaviors not present in STL should only be added when they are related
to the specific data structure implemented by the container.
* For STL-like constructs our policy is that they should use STL-like naming
even when it may conflict with the style guide. So functions and class names
should be lower case with underscores. Non-STL-like classes and functions
should use Google naming.
## Performance testing
Since the primitives provided by //base are used very widely, it is important to
ensure they scale to the necessary workloads and perform well under all
supported platforms. The `base_perftests` target is a suite of
synthetic microbenchmarks that measure performance in various scenarios:
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
queue in isolation.
* ConditionVariablePerfTest: Measures thread switching cost of condition
variables.
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
machinery.
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
* MessageLoopPerfTest: Measures the speed of task posting in various
configurations.
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
pthreads.
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
underlying task runners.
* TaskObserverPerfTest: Measures the incremental cost of adding task
observers.
* TaskPerfTest: Checks the cost of posting tasks between threads.
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
multithreaded scenarios.
Regressions in these benchmarks can generally by caused by 1) operating system
changes, 2) compiler version or flag changes or 3) changes in //base code
itself.

13
src/base/SECURITY_OWNERS Normal file
View File

@ -0,0 +1,13 @@
# Changes to code that runs at high privilege and which has a high risk of
# memory corruption, such as parsers for complex inputs, require a security
# review to avoid introducing sandbox escapes.
#
# Although this file is in base/, it may apply to more than just base, OWNERS
# files outside of base may also include this file.
#
# Security team: If you are uncomfortable reviewing a particular bit of code
# yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org
rsesek@chromium.org
tsepez@chromium.org

View File

@ -0,0 +1,90 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//base/allocator/allocator.gni")
import("//build/buildflag_header.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
buildflag_header("buildflags") {
header = "buildflags.h"
_use_partition_alloc_as_malloc = use_allocator == "partition"
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
"Partition alloc requires the allocator shim")
# BackupRefPtr(BRP) build flags.
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
_put_ref_count_in_previous_slot =
put_ref_count_in_previous_slot && _use_backup_ref_ptr
_enable_backup_ref_ptr_slow_checks =
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
_enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
_record_alloc_info = false
flags = [
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
"USE_FAKE_BINARY_EXPERIMENT=$use_fake_binary_experiment",
"RECORD_ALLOC_INFO=$_record_alloc_info",
]
}
if (is_apple) {
source_set("early_zone_registration_mac") {
sources = [
"early_zone_registration_mac.cc",
"early_zone_registration_mac.h",
]
deps = [ ":buildflags" ]
}
}
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
config("wrap_malloc_symbols") {
ldflags = [
"-Wl,-wrap,calloc",
"-Wl,-wrap,free",
"-Wl,-wrap,malloc",
"-Wl,-wrap,memalign",
"-Wl,-wrap,posix_memalign",
"-Wl,-wrap,pvalloc",
"-Wl,-wrap,realloc",
"-Wl,-wrap,valloc",
# <stdlib.h> functions
"-Wl,-wrap,realpath",
# <string.h> functions
"-Wl,-wrap,strdup",
"-Wl,-wrap,strndup",
# <unistd.h> functions
"-Wl,-wrap,getcwd",
# <stdio.h> functions
"-Wl,-wrap,asprintf",
"-Wl,-wrap,vasprintf",
]
}
config("mac_no_default_new_delete_symbols") {
if (!is_component_build) {
# This is already set when we compile libc++, see
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as well,
# since the shim defines the symbols, to prevent them being exported.
cflags = [ "-fvisibility-global-new-delete-hidden" ]
}
}

View File

@ -0,0 +1,3 @@
monorail {
component: "Internals"
}

View File

@ -0,0 +1,8 @@
lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org
per-file allocator.gni=bartekn@chromium.org
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org
per-file partition_alloc*=bartekn@chromium.org
per-file BUILD.gn=bartekn@chromium.org

View File

@ -0,0 +1,172 @@
This document describes how malloc / new calls are routed in the various Chrome
platforms.
Bare in mind that the chromium codebase does not always just use `malloc()`.
Some examples:
- Large parts of the renderer (Blink) use two home-brewed allocators,
PartitionAlloc and BlinkGC (Oilpan).
- Some subsystems, such as the V8 JavaScript engine, handle memory management
autonomously.
- Various parts of the codebase use abstractions such as `SharedMemory` or
`DiscardableMemory` which, similarly to the above, have their own page-level
memory management.
Background
----------
The `allocator` target defines at compile-time the platform-specific choice of
the allocator and extra-hooks which services calls to malloc/new. The relevant
build-time flags involved are `use_allocator` and `use_allocator_shim`.
The default choices are as follows:
**Windows**
`use_allocator: winheap`, the default Windows heap.
Additionally, `static_library` (i.e. non-component) builds have a shim
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
The shim layer provides extra security features, such as preventing large
allocations that can hit signed vs. unsigned bugs in third_party code.
**Android**
`use_allocator: none`, always use the allocator symbols coming from Android's
libc (Bionic). As it is developed as part of the OS, it is considered to be
optimized for small devices and more memory-efficient than other choices.
The actual implementation backing malloc symbols in Bionic is up to the board
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
In addition, when building for `asan` / `msan` both the allocator and the shim
layer are disabled.
Layering and build deps
-----------------------
The `allocator` target provides the linker flags required for the Windows shim
layer. The `base` target is (almost) the only one depending on `allocator`. No
other targets should depend on it, with the exception of the very few
executables / dynamic libraries that don't depend, either directly or
indirectly, on `base` within the scope of a linker unit.
More importantly, **no other place outside of `/base` should depend on the
specific allocator**.
If such a functional dependency is required that should be achieved using
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
`/base/memory/`)
**Why `base` depends on `allocator`?**
Because it needs to provide services that depend on the actual allocator
implementation. In the past `base` used to pretend to be allocator-agnostic
and get the dependencies injected by other layers. This ended up being an
inconsistent mess.
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
Linker unit targets (executables and shared libraries) that depend in some way
on `base` (most of the targets in the codebase) automatically get the correct
set of linker flags to pull in the Windows shim-layer (if needed).
Source code
-----------
This directory contains just the allocator (i.e. shim) layer that switches
between the different underlying memory allocation implementations.
Unified allocator shim
----------------------
On most platforms, Chrome overrides the malloc / operator new symbols (and
corresponding free / delete and other variants). This is to enforce security
checks and lately to enable the
[memory-infra heap profiler][url-memory-infra-heap-profiler].
Historically each platform had its special logic for defining the allocator
symbols in different places of the codebase. The unified allocator shim is
a project aimed to unify the symbol definition and allocator routing logic in
a central place.
- Full documentation: [Allocator shim design doc][url-allocator-shim].
- Current state: Available and enabled by default on Android, CrOS, Linux,
Mac OS and Windows.
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
- Build-time flag: `use_allocator_shim`.
**Overview of the unified allocator shim**
The allocator shim consists of three stages:
```
+-------------------------+ +-----------------------+ +----------------+
| malloc & friends | -> | shim layer | -> | Routing to |
| symbols definition | | implementation | | allocator |
+-------------------------+ +-----------------------+ +----------------+
| - libc symbols (malloc, | | - Security checks | | - glibc |
| calloc, free, ...) | | - Chain of dispatchers| | - Android |
| - C++ symbols (operator | | that can intercept | | bionic |
| new, delete, ...) | | and override | | - WinHeap |
| - glibc weak symbols | | allocations | | - Partition |
| (__libc_malloc, ...) | +-----------------------+ | Alloc |
+-------------------------+ +----------------+
```
**1. malloc symbols definition**
This stage takes care of overriding the symbols `malloc`, `free`,
`operator new`, `operator delete` and friends and routing those calls inside the
allocator shim (next point).
This is taken care of by the headers in `allocator_shim_override_*`.
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
can override in `allocator_shim_override_ucr_symbols_win.h`.
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
`operator delete` and friends).
This enables proper interposition of malloc symbols referenced by the main
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
The Linux/CrOS shim was introduced by
[crrev.com/1675143004](https://crrev.com/1675143004).
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
possible. This is because Android processes are `fork()`-ed from the Android
zygote, which pre-loads libc.so and only later native code gets loaded via
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
scope).
In this case, the approach instead of wrapping symbol resolution at link time
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
The use of this wrapping flag causes:
- All references to allocator symbols in the Chrome codebase to be rewritten as
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
route allocator calls inside the shim layer.
- The reference to the original `malloc` symbols (which typically is defined by
the system's libc.so) are accessible via the special `__real_malloc` and
friends symbols (which will be relocated, at load time, against `malloc`).
In summary, this approach is transparent to the dynamic loader, which still sees
undefined symbol references to malloc symbols.
These symbols will be resolved against libc.so as usual.
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
**2. Shim layer implementation**
This stage contains the actual shim implementation. This consists of:
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
(using the `InsertAllocatorDispatch` API). They can intercept and override
allocator calls.
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
This happens inside `allocator_shim.cc`
**3. Final allocator routing**
The final element of the aforementioned dispatcher chain is statically defined
at build time and ultimately routes the allocator calls to the actual allocator
(as described in the *Background* section above). This is taken care of by the
headers in `allocator_shim_default_dispatch_to_*` files.
Related links
-------------
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing

View File

@ -0,0 +1,145 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/chromecast_build.gni")
import("//build/config/sanitizers/sanitizers.gni")
if (is_ios) {
import("//build/config/ios/ios_sdk.gni")
}
# Sanitizers replace the allocator, don't use our own.
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
# - Component build support is disabled on all platforms. It is known to cause
# issues on some (e.g. Windows with shims, Android with non-universal symbol
# wrapping), and has not been validated on others.
# - Windows: debug CRT is not compatible, see below.
_disable_partition_alloc = is_component_build || (is_win && is_debug)
# - NaCl: No plans to support it.
# - iOS: not done yet.
_is_partition_alloc_platform = !is_nacl && !is_ios
# Under Windows Debug the allocator shim is not compatible with CRT.
# NaCl in particular does seem to link some binaries statically
# against the debug CRT with "is_nacl=false".
# Under Fuchsia the allocator shim is only required for PA-E.
# For all other platforms & configurations, the shim is required, to replace
# the default system allocators, e.g. with Partition Alloc.
if ((is_linux || is_chromeos || is_android || is_apple ||
(is_fuchsia && !_disable_partition_alloc) ||
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
_default_use_allocator_shim = true
} else {
_default_use_allocator_shim = false
}
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
!_disable_partition_alloc) {
_default_allocator = "partition"
} else {
_default_allocator = "none"
}
declare_args() {
# Memory allocator to use. Set to "none" to use default allocator.
use_allocator = _default_allocator
# Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = _default_use_allocator_shim
# Whether PartitionAlloc should be available for use or not.
# true makes PartitionAlloc linked to the executable or shared library and
# makes it available for use. It doesn't mean that the default allocator
# is PartitionAlloc, which is governed by |use_allocator|.
#
# This flag is currently set to false only on Cronet bots, because Cronet
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
# size increase (crbug.com/674570).
use_partition_alloc = true
}
if (!use_partition_alloc && use_allocator == "partition") {
# If there is a conflict, prioritize |use_partition_alloc| over
# |use_allocator|.
use_allocator = "none"
}
assert(use_allocator == "none" || use_allocator == "partition")
assert(
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
is_fuchsia || is_apple,
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
"and Windows.")
if (is_win && use_allocator_shim) {
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
assert(!is_component_build,
"The allocator shim doesn't work for the component build on Windows.")
}
_is_brp_supported = (is_win || is_android) && use_allocator == "partition"
declare_args() {
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
# of raw_ptr<T>, and enable PartitionAlloc support for it.
use_backup_ref_ptr = _is_brp_supported
}
declare_args() {
# If BRP is enabled, additional options are available:
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
# previous slot (or in metadata if a slot starts on the page boundary), as
# opposed to the beginning of the slot.
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
# are too expensive to have on by default.
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
# dangling during their lifetime.
put_ref_count_in_previous_slot = false
enable_backup_ref_ptr_slow_checks = false
enable_dangling_raw_ptr_checks = false
# Registers the binary for a fake binary A/B experiment. The binaries built
# with this flag have no behavior difference, except for setting a synthetic
# Finch.
use_fake_binary_experiment = false
use_asan_backup_ref_ptr = false
}
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
# In theory, such a configuration is possible, but its scope would be limited to
# only Blink partitions, which is currently not tested. Better to trigger an
# error, than have BackupRefPtr silently disabled while believing it is enabled.
if (!is_nacl) {
assert(!use_backup_ref_ptr || use_allocator == "partition",
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
}
# put_ref_count_in_previous_slot can only be used if use_backup_ref_ptr
# is true.
assert(
use_backup_ref_ptr || !put_ref_count_in_previous_slot,
"Can't put ref count in the previous slot if BackupRefPtr isn't enabled at all")
# enable_backup_ref_ptr_slow_checks can only be used if use_backup_ref_ptr
# is true.
assert(use_backup_ref_ptr || !enable_backup_ref_ptr_slow_checks,
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
# enable_dangling_raw_ptr_checks can only be used if use_backup_ref_ptr
# is true.
assert(
use_backup_ref_ptr || !enable_dangling_raw_ptr_checks,
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
assert(
!use_backup_ref_ptr || !use_asan_backup_ref_ptr,
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
assert(!use_asan_backup_ref_ptr || is_asan,
"AsanBackupRefPtr requires AddressSanitizer")

View File

@ -0,0 +1,40 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_check.h"
#include "base/allocator/buildflags.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include "base/allocator/winheap_stubs_win.h"
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <malloc.h>
#endif
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/allocator_interception_mac.h"
#endif
namespace base {
namespace allocator {
bool IsAllocatorInitialized() {
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
// Set by allocator_shim_override_ucrt_symbols_win.h when the
// shimmed _set_new_mode() is called.
return g_is_win_shim_layer_initialized;
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// From allocator_interception_mac.mm.
return base::allocator::g_replaced_default_zone;
#else
return true;
#endif
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
#include "base/base_export.h"
namespace base {
namespace allocator {
BASE_EXPORT bool IsAllocatorInitialized();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_

View File

@ -0,0 +1,15 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_extension.h"
#include "base/allocator/buildflags.h"
#include "base/check.h"
namespace base {
namespace allocator {
void ReleaseFreeMemory() {}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,23 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#include <stddef.h> // for size_t
#include "base/base_export.h"
#include "build/build_config.h"
namespace base {
namespace allocator {
// Request that the allocator release any free memory it knows about to the
// system.
BASE_EXPORT void ReleaseFreeMemory();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_

View File

@ -0,0 +1,65 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
#include <stddef.h>
#include "base/base_export.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
struct MallocZoneFunctions;
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
// the functions in the current default malloc zone. This must be called before
// the default malloc zone is changed to have its intended effect.
void InitializeDefaultDispatchToMacAllocator();
// Saves the function pointers currently used by the default zone.
void StoreFunctionsForDefaultZone();
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
void StoreFunctionsForAllZones();
// For all malloc zones that have been stored, replace their functions with
// |functions|.
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
extern bool g_replaced_default_zone;
// Calls the original implementation of malloc/calloc prior to interception.
bool UncheckedMallocMac(size_t size, void** result);
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
// Foundation and Objective-C allocations.
// Has no effect on the default malloc zone if the allocator shim already
// performs that interception.
BASE_EXPORT void InterceptAllocationsMac();
// Updates all malloc zones to use their original functions.
// Also calls ClearAllMallocZonesForTesting.
BASE_EXPORT void UninterceptMallocZonesForTesting();
// Returns true if allocations are successfully being intercepted for all malloc
// zones.
bool AreMallocZonesIntercepted();
// Periodically checks for, and shims new malloc zones. Stops checking after 1
// minute.
BASE_EXPORT void PeriodicallyShimNewMallocZones();
// Exposed for testing.
BASE_EXPORT void ShimNewMallocZones();
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
const MallocZoneFunctions* functions);
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_

View File

@ -0,0 +1,613 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains all the logic necessary to intercept allocations on
// macOS. "malloc zones" are an abstraction that allows the process to intercept
// all malloc-related functions. There is no good mechanism [short of
// interposition] to determine new malloc zones are added, so there's no clean
// mechanism to intercept all malloc zones. This file contains logic to
// intercept the default and purgeable zones, which always exist. A cursory
// review of Chrome seems to imply that non-default zones are almost never used.
//
// This file also contains logic to intercept Core Foundation and Objective-C
// allocations. The implementations forward to the default malloc zone, so the
// only reason to intercept these calls is to re-label OOM crashes with slightly
// more details.
#include "base/allocator/allocator_interception_mac.h"
#include <CoreFoundation/CoreFoundation.h>
#import <Foundation/Foundation.h>
#include <errno.h>
#include <mach/mach.h>
#import <objc/runtime.h>
#include <stddef.h>
#include <new>
#include "base/allocator/buildflags.h"
#include "base/allocator/malloc_zone_functions_mac.h"
#include "base/bind.h"
#include "base/bits.h"
#include "base/logging.h"
#include "base/mac/mach_logging.h"
#include "base/process/memory.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "third_party/apple_apsl/CFBase.h"
#if BUILDFLAG(IS_IOS)
#include "base/ios/ios_util.h"
#else
#include "base/mac/mac_util.h"
#endif
namespace base {
namespace allocator {
bool g_replaced_default_zone = false;
namespace {
bool g_oom_killer_enabled;
bool g_allocator_shims_failed_to_install;
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
// read-only, to prevent them from being overwritten in an attack. However,
// blindly unprotecting and reprotecting the zone allocators fails with
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
// memory in its bss. Explicit saving/restoring of the protection is required.
//
// This function takes a pointer to a malloc zone, de-protects it if necessary,
// and returns (in the out parameters) a region of memory (if any) to be
// re-protected when modifications are complete. This approach assumes that
// there is no contention for the protection of this memory.
//
// Returns true if the malloc zone was properly de-protected, or false
// otherwise. If this function returns false, the out parameters are invalid and
// the region does not need to be re-protected.
bool DeprotectMallocZone(ChromeMallocZone* default_zone,
vm_address_t* reprotection_start,
vm_size_t* reprotection_length,
vm_prot_t* reprotection_value) {
mach_port_t unused;
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
struct vm_region_basic_info_64 info;
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
kern_return_t result =
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
VM_REGION_BASIC_INFO_64,
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
if (result != KERN_SUCCESS) {
MACH_LOG(ERROR, result) << "vm_region_64";
return false;
}
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
// balance it with a deallocate in case this ever changes. See
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
mach_port_deallocate(mach_task_self(), unused);
if (!(info.max_protection & VM_PROT_WRITE)) {
LOG(ERROR) << "Invalid max_protection " << info.max_protection;
return false;
}
// Does the region fully enclose the zone pointers? Possibly unwarranted
// simplification used: using the size of a full version 10 malloc zone rather
// than the actual smaller size if the passed-in zone is not version 10.
DCHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
reinterpret_cast<vm_address_t>(*reprotection_start);
DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
if (info.protection & VM_PROT_WRITE) {
// No change needed; the zone is already writable.
*reprotection_start = 0;
*reprotection_length = 0;
*reprotection_value = VM_PROT_NONE;
} else {
*reprotection_value = info.protection;
result =
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
false, info.protection | VM_PROT_WRITE);
if (result != KERN_SUCCESS) {
MACH_LOG(ERROR, result) << "vm_protect";
return false;
}
}
return true;
}
#if !defined(ADDRESS_SANITIZER)
MallocZoneFunctions g_old_zone;
MallocZoneFunctions g_old_purgeable_zone;
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_zone.malloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_calloc(struct _malloc_zone_t* zone,
size_t num_items,
size_t size) {
void* result = g_old_zone.calloc(zone, num_items, size);
if (!result && num_items && size)
TerminateBecauseOutOfMemory(num_items * size);
return result;
}
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_zone.valloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
g_old_zone.free(zone, ptr);
}
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
void* result = g_old_zone.realloc(zone, ptr, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_memalign(struct _malloc_zone_t* zone,
size_t alignment,
size_t size) {
void* result = g_old_zone.memalign(zone, alignment, size);
// Only die if posix_memalign would have returned ENOMEM, since there are
// other reasons why null might be returned. See posix_memalign() in 10.15's
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
if (!result && size && alignment >= sizeof(void*) &&
base::bits::IsPowerOfTwo(alignment)) {
TerminateBecauseOutOfMemory(size);
}
return result;
}
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_purgeable_zone.malloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
size_t num_items,
size_t size) {
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
if (!result && num_items && size)
TerminateBecauseOutOfMemory(num_items * size);
return result;
}
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_purgeable_zone.valloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
g_old_purgeable_zone.free(zone, ptr);
}
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
void* ptr,
size_t size) {
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
size_t alignment,
size_t size) {
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
// Only die if posix_memalign would have returned ENOMEM, since there are
// other reasons why null might be returned. See posix_memalign() in 10.15's
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
if (!result && size && alignment >= sizeof(void*) &&
base::bits::IsPowerOfTwo(alignment)) {
TerminateBecauseOutOfMemory(size);
}
return result;
}
#endif // !defined(ADDRESS_SANITIZER)
#if !defined(ADDRESS_SANITIZER)
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
#if BUILDFLAG(IS_IOS)
return !base::ios::IsRunningOnOrLater(16, 0, 0);
#else
return !base::mac::IsOSLaterThan12_DontCallThis();
#endif
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
return &our_allocator->_context;
}
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
#endif // !defined(ADDRESS_SANITIZER)
// === Cocoa NSObject allocation ===
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
allocWithZone_t g_old_allocWithZone;
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
id result = g_old_allocWithZone(self, _cmd, zone);
if (!result)
TerminateBecauseOutOfMemory(0);
return result;
}
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
if (!IsMallocZoneAlreadyStored(chrome_zone))
return;
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
ReplaceZoneFunctions(chrome_zone, &functions);
}
} // namespace
bool UncheckedMallocMac(size_t size, void** result) {
#if defined(ADDRESS_SANITIZER)
*result = malloc(size);
#else
if (g_old_zone.malloc) {
*result = g_old_zone.malloc(malloc_default_zone(), size);
} else {
*result = malloc(size);
}
#endif // defined(ADDRESS_SANITIZER)
return *result != NULL;
}
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
#if defined(ADDRESS_SANITIZER)
*result = calloc(num_items, size);
#else
if (g_old_zone.calloc) {
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
} else {
*result = calloc(num_items, size);
}
#endif // defined(ADDRESS_SANITIZER)
return *result != NULL;
}
void InitializeDefaultDispatchToMacAllocator() {
StoreFunctionsForAllZones();
}
void StoreFunctionsForDefaultZone() {
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
malloc_default_zone());
StoreMallocZone(default_zone);
}
void StoreFunctionsForAllZones() {
// This ensures that the default zone is always at the front of the array,
// which is important for performance.
StoreFunctionsForDefaultZone();
vm_address_t* zones;
unsigned int count;
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
if (kr != KERN_SUCCESS)
return;
for (unsigned int i = 0; i < count; ++i) {
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
StoreMallocZone(zone);
}
}
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
// The default zone does not get returned in malloc_get_all_zones().
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
ReplaceZoneFunctions(default_zone, functions);
}
vm_address_t* zones;
unsigned int count;
kern_return_t kr =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
if (kr != KERN_SUCCESS)
return;
for (unsigned int i = 0; i < count; ++i) {
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
if (DoesMallocZoneNeedReplacing(zone, functions)) {
ReplaceZoneFunctions(zone, functions);
}
}
g_replaced_default_zone = true;
}
void InterceptAllocationsMac() {
if (g_oom_killer_enabled)
return;
g_oom_killer_enabled = true;
// === C malloc/calloc/valloc/realloc/posix_memalign ===
// This approach is not perfect, as requests for amounts of memory larger than
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
// fail with a NULL rather than dying (see malloc_zone_malloc() in
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
// details). Unfortunately, it's the best we can do. Also note that this does
// not affect allocations from non-default zones.
#if !defined(ADDRESS_SANITIZER)
// Don't do anything special on OOM for the malloc zones replaced by
// AddressSanitizer, as modifying or protecting them may not work correctly.
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// The malloc zone backed by PartitionAlloc crashes by default, so there is
// no need to install the OOM killer.
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
if (!IsMallocZoneAlreadyStored(default_zone)) {
StoreZoneFunctions(default_zone, &g_old_zone);
MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc;
new_functions.calloc = oom_killer_calloc;
new_functions.valloc = oom_killer_valloc;
new_functions.free = oom_killer_free;
new_functions.realloc = oom_killer_realloc;
new_functions.memalign = oom_killer_memalign;
ReplaceZoneFunctions(default_zone, &new_functions);
g_replaced_default_zone = true;
}
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
ChromeMallocZone* purgeable_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc_purgeable;
new_functions.calloc = oom_killer_calloc_purgeable;
new_functions.valloc = oom_killer_valloc_purgeable;
new_functions.free = oom_killer_free_purgeable;
new_functions.realloc = oom_killer_realloc_purgeable;
new_functions.memalign = oom_killer_memalign_purgeable;
ReplaceZoneFunctions(purgeable_zone, &new_functions);
}
#endif
// === C malloc_zone_batch_malloc ===
// batch_malloc is omitted because the default malloc zone's implementation
// only supports batch_malloc for "tiny" allocations from the free list. It
// will fail for allocations larger than "tiny", and will only allocate as
// many blocks as it's able to from the free list. These factors mean that it
// can return less than the requested memory even in a non-out-of-memory
// situation. There's no good way to detect whether a batch_malloc failure is
// due to these other factors, or due to genuine memory or address space
// exhaustion. The fact that it only allocates space from the "tiny" free list
// means that it's likely that a failure will not be due to memory exhaustion.
// Similarly, these constraints on batch_malloc mean that callers must always
// be expecting to receive less memory than was requested, even in situations
// where memory pressure is not a concern. Finally, the only public interface
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
// system's malloc implementation. It's unlikely that anyone's even heard of
// it.
#ifndef ADDRESS_SANITIZER
// === Core Foundation CFAllocators ===
// This will not catch allocation done by custom allocators, but will catch
// all allocation done by system-provided ones.
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
!g_old_cfallocator_malloc_zone)
<< "Old allocators unexpectedly non-null";
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
if (cf_allocator_internals_known) {
CFAllocatorContext* context =
ContextForCFAllocator(kCFAllocatorSystemDefault);
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
g_old_cfallocator_system_default = context->allocate;
CHECK(g_old_cfallocator_system_default)
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
context->allocate = oom_killer_cfallocator_system_default;
context = ContextForCFAllocator(kCFAllocatorMalloc);
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
g_old_cfallocator_malloc = context->allocate;
CHECK(g_old_cfallocator_malloc)
<< "Failed to get kCFAllocatorMalloc allocation function.";
context->allocate = oom_killer_cfallocator_malloc;
context = ContextForCFAllocator(kCFAllocatorMallocZone);
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
g_old_cfallocator_malloc_zone = context->allocate;
CHECK(g_old_cfallocator_malloc_zone)
<< "Failed to get kCFAllocatorMallocZone allocation function.";
context->allocate = oom_killer_cfallocator_malloc_zone;
} else {
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
"failures via CFAllocator will not result in termination. "
"http://crbug.com/45650";
}
#endif
// === Cocoa NSObject allocation ===
// Note that both +[NSObject new] and +[NSObject alloc] call through to
// +[NSObject allocWithZone:].
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
Class nsobject_class = [NSObject class];
Method orig_method =
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
g_old_allocWithZone =
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
CHECK(g_old_allocWithZone)
<< "Failed to get allocWithZone allocation function.";
method_setImplementation(orig_method,
reinterpret_cast<IMP>(oom_killer_allocWithZone));
}
void UninterceptMallocZonesForTesting() {
UninterceptMallocZoneForTesting(malloc_default_zone());
vm_address_t* zones;
unsigned int count;
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
CHECK(kr == KERN_SUCCESS);
for (unsigned int i = 0; i < count; ++i) {
UninterceptMallocZoneForTesting(
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
}
ClearAllMallocZonesForTesting();
}
bool AreMallocZonesIntercepted() {
return !g_allocator_shims_failed_to_install;
}
namespace {
void ShimNewMallocZonesAndReschedule(base::Time end_time,
base::TimeDelta delay) {
ShimNewMallocZones();
if (base::Time::Now() > end_time)
return;
base::TimeDelta next_delay = delay * 2;
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
delay);
}
} // namespace
void PeriodicallyShimNewMallocZones() {
base::Time end_time = base::Time::Now() + base::Minutes(1);
base::TimeDelta initial_delay = base::Seconds(1);
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
}
void ShimNewMallocZones() {
StoreFunctionsForAllZones();
// Use the functions for the default zone as a template to replace those
// new zones.
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
DCHECK(IsMallocZoneAlreadyStored(default_zone));
MallocZoneFunctions new_functions;
StoreZoneFunctions(default_zone, &new_functions);
ReplaceFunctionsForStoredZones(&new_functions);
}
void ReplaceZoneFunctions(ChromeMallocZone* zone,
const MallocZoneFunctions* functions) {
// Remove protection.
vm_address_t reprotection_start = 0;
vm_size_t reprotection_length = 0;
vm_prot_t reprotection_value = VM_PROT_NONE;
bool success = DeprotectMallocZone(zone, &reprotection_start,
&reprotection_length, &reprotection_value);
if (!success) {
g_allocator_shims_failed_to_install = true;
return;
}
CHECK(functions->malloc && functions->calloc && functions->valloc &&
functions->free && functions->realloc);
zone->malloc = functions->malloc;
zone->calloc = functions->calloc;
zone->valloc = functions->valloc;
zone->free = functions->free;
zone->realloc = functions->realloc;
if (functions->batch_malloc)
zone->batch_malloc = functions->batch_malloc;
if (functions->batch_free)
zone->batch_free = functions->batch_free;
if (functions->size)
zone->size = functions->size;
if (zone->version >= 5 && functions->memalign) {
zone->memalign = functions->memalign;
}
if (zone->version >= 6 && functions->free_definite_size) {
zone->free_definite_size = functions->free_definite_size;
}
// Restore protection if it was active.
if (reprotection_start) {
kern_return_t result =
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
false, reprotection_value);
MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
}
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,418 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include <errno.h>
#include <atomic>
#include <new>
#include "base/allocator/buildflags.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/memory/page_size.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#if !BUILDFLAG(IS_WIN)
#include <unistd.h>
#else
#include "base/allocator/winheap_stubs_win.h"
#endif
#if BUILDFLAG(IS_APPLE)
#include <malloc/malloc.h>
#include "base/allocator/allocator_interception_mac.h"
#include "base/mac/mach_logging.h"
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#endif
// No calls to malloc / new in this file. They would would cause re-entrancy of
// the shim, which is hard to deal with. Keep this code as simple as possible
// and don't use any external C++ object here, not even //base ones. Even if
// they are safe to use today, in future they might be refactored.
namespace {
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
&base::allocator::AllocatorDispatch::default_dispatch};
bool g_call_new_handler_on_malloc_failure = false;
ALWAYS_INLINE size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize)
pagesize = base::GetPageSize();
return pagesize;
}
// Calls the std::new handler thread-safely. Returns true if a new_handler was
// set and called, false if no new_handler was set.
bool CallNewHandler(size_t size) {
#if BUILDFLAG(IS_WIN)
return base::allocator::WinCallNewHandler(size);
#else
std::new_handler nh = std::get_new_handler();
if (!nh)
return false;
(*nh)();
// Assume the new_handler will abort if it fails. Exception are disabled and
// we don't support the case of a new_handler throwing std::bad_balloc.
return true;
#endif
}
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
return g_chain_head.load(std::memory_order_relaxed);
}
} // namespace
namespace base {
namespace allocator {
void SetCallNewHandlerOnMallocFailure(bool value) {
g_call_new_handler_on_malloc_failure = value;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(value);
#endif
}
void* UncheckedAlloc(size_t size) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
}
void UncheckedFree(void* ptr) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_function(chain_head, ptr, nullptr);
}
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
// Loop in case of (an unlikely) race on setting the list head.
size_t kMaxRetries = 7;
for (size_t i = 0; i < kMaxRetries; ++i) {
const AllocatorDispatch* chain_head = GetChainHead();
dispatch->next = chain_head;
// This function guarantees to be thread-safe w.r.t. concurrent
// insertions. It also has to guarantee that all the threads always
// see a consistent chain, hence the atomic_thread_fence() below.
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
// we don't really want this to be a release-store with a corresponding
// acquire-load during malloc().
std::atomic_thread_fence(std::memory_order_seq_cst);
// Set the chain head to the new dispatch atomically. If we lose the race,
// retry.
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
// Success.
return;
}
}
CHECK(false); // Too many retries, this shouldn't happen.
}
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
DCHECK_EQ(GetChainHead(), dispatch);
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
}
} // namespace allocator
} // namespace base
// The Shim* functions below are the entry-points into the shim-layer and
// are supposed to be invoked by the allocator_shim_override_*
// headers to route the malloc / new symbols through the shim layer.
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
// between the system-defined entry points and the shim implementations.
extern "C" {
// The general pattern for allocations is:
// - Try to allocate, if succeded return the pointer.
// - If the allocation failed:
// - Call the std::new_handler if it was a C++ allocation.
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
// AND SetCallNewHandlerOnMallocFailure(true).
// - If the std::new_handler is NOT set just return nullptr.
// - If the std::new_handler is set:
// - Assume it will abort() if it fails (very likely the new_handler will
// just suicide printing a message).
// - Assume it did succeed if it returns, in which case reattempt the alloc.
ALWAYS_INLINE void* ShimCppNew(size_t size) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_function(chain_head, size, context);
} while (!ptr && CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
void* context = nullptr;
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, context);
}
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (!ptr && CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void ShimCppDelete(void* address) {
void* context = nullptr;
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
context = malloc_default_zone();
#endif
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_function(chain_head, address, context);
}
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_function(chain_head, size, context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
// realloc(size == 0) means free() and might return a nullptr. We should
// not call the std::new_handler in that case, though.
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->realloc_function(chain_head, address, size, context);
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
// in tc_malloc.cc.
if (((alignment % sizeof(void*)) != 0) ||
!base::bits::IsPowerOfTwo(alignment)) {
return EINVAL;
}
void* ptr = ShimMemalign(alignment, size, nullptr);
*res = ptr;
return ptr ? 0 : ENOMEM;
}
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
return ShimMemalign(GetCachedPageSize(), size, context);
}
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
// pvalloc(0) should allocate one page, according to its man page.
if (size == 0) {
size = GetCachedPageSize();
} else {
size = base::bits::AlignUp(size, GetCachedPageSize());
}
// The third argument is nullptr because pvalloc is glibc only and does not
// exist on OSX/BSD systems.
return ShimMemalign(GetCachedPageSize(), size, nullptr);
}
ALWAYS_INLINE void ShimFree(void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_function(chain_head, address, context);
}
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->get_size_estimate_function(
chain_head, const_cast<void*>(address), context);
}
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
void** results,
unsigned num_requested,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->batch_malloc_function(chain_head, size, results,
num_requested, context);
}
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->batch_free_function(chain_head, to_be_freed,
num_to_be_freed, context);
}
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_definite_size_function(chain_head, ptr, size,
context);
}
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
size_t alignment,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context) {
// _aligned_realloc(size == 0) means _aligned_free() and might return a
// nullptr. We should not call the std::new_handler in that case, though.
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
alignment, context);
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->aligned_free_function(chain_head, address, context);
}
} // extern "C"
#if !BUILDFLAG(IS_WIN) && \
!(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
// Cpp symbols (new / delete) should always be routed through the shim layer
// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
// malloc intercept is deep enough that it also catches the cpp calls.
//
// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
// base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
// in case of operator new() noexcept. Thus, operator new() noexcept needs to
// be routed to base::internal::PartitionMallocUnchecked through the shim layer.
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
#endif
#if BUILDFLAG(IS_ANDROID)
// Android does not support symbol interposition. The way malloc symbols are
// intercepted on Android is by using link-time -wrap flags.
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
#elif BUILDFLAG(IS_WIN)
// On Windows we use plain link-time overriding of the CRT symbols.
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
#elif BUILDFLAG(IS_APPLE)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/allocator_shim_override_mac_default_zone.h"
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/allocator_shim_override_mac_symbols.h"
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#else
#include "base/allocator/allocator_shim_override_libc_symbols.h"
#endif
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
// as we are then asked to free memory we didn't allocate.
//
// This only happened in glibc to allocate TLS storage metadata, and there are
// no other callers of __libc_memalign() there as of September 2020. To work
// around this issue, intercept this internal libc symbol to make sure that both
// the allocation and the free() are caught by the shim.
//
// This seems fragile, and is, but there is ample precedent for it, making it
// quite likely to keep working in the future. For instance, LLVM for LSAN uses
// this mechanism.
#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
#endif
#if BUILDFLAG(IS_APPLE)
namespace base {
namespace allocator {
void InitializeAllocatorShim() {
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Prepares the default dispatch. After the intercepted malloc calls have
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
// This replaces the default malloc zone, causing calls to malloc & friends
// from the codebase to be routed to ShimMalloc() above.
base::allocator::ReplaceFunctionsForStoredZones(&functions);
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
} // namespace allocator
} // namespace base
#endif
// Cross-checks.
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#error The allocator shim should not be compiled when building for memory tools.
#endif
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
(defined(_MSC_VER) && defined(_CPPUNWIND))
#error This code cannot be used when exceptions are turned on.
#endif

View File

@ -0,0 +1,200 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
#include <stddef.h>
#include <stdint.h>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/types/strong_alias.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_ALLOW_PCSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif
namespace base {
namespace allocator {
// Allocator Shim API. Allows to:
// - Configure the behavior of the allocator (what to do on OOM failures).
// - Install new hooks (AllocatorDispatch) in the allocator chain.
// When this shim layer is enabled, the route of an allocation is as-follows:
//
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
// The override_* headers define the symbols required to intercept calls to
// malloc() and operator new (if not overridden by specific C++ classes).
//
// [allocator_shim.cc] Routing allocation calls to the shim:
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
// ShimCppNew() etc. methods defined in allocator_shim.cc.
// These methods will: (1) forward the allocation call to the front of the
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
// call std::new_handler on OOM failure).
//
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
// It is a singly linked list where each element is a struct with function
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
// consists of a single AllocatorDispatch element, herein called
// the "default dispatch", which is statically defined at build time and
// ultimately routes the calls to the actual allocator defined by the build
// config (glibc, ...).
//
// It is possible to dynamically insert further AllocatorDispatch stages
// to the front of the chain, for debugging / profiling purposes.
//
// All the functions must be thread safe. The shim does not enforce any
// serialization. This is to route to thread-aware allocators without
// introducing unnecessary perf hits.
struct AllocatorDispatch {
using AllocFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context);
using AllocAlignedFn = void*(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context);
using ReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
void* context);
using FreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
// Returns the allocated size of user data (not including heap overhead).
// Can be larger than the requested size.
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
void* address,
void* context);
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context);
using BatchFreeFn = void(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context);
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context);
using AlignedMallocFn = void*(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context);
using AlignedReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context);
using AlignedFreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
AllocFn* const alloc_function;
AllocUncheckedFn* const alloc_unchecked_function;
AllocZeroInitializedFn* const alloc_zero_initialized_function;
AllocAlignedFn* const alloc_aligned_function;
ReallocFn* const realloc_function;
FreeFn* const free_function;
GetSizeEstimateFn* const get_size_estimate_function;
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
// and iOS allocators.
BatchMallocFn* const batch_malloc_function;
BatchFreeFn* const batch_free_function;
FreeDefiniteSizeFn* const free_definite_size_function;
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
// Windows allocator.
AlignedMallocFn* const aligned_malloc_function;
AlignedReallocFn* const aligned_realloc_function;
AlignedFreeFn* const aligned_free_function;
const AllocatorDispatch* next;
// |default_dispatch| is statically defined by one (and only one) of the
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
// configuration.
static const AllocatorDispatch default_dispatch;
};
// When true makes malloc behave like new, w.r.t calling the new_handler if
// the allocation fails (see set_new_mode() in Windows).
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
// regardless of SetCallNewHandlerOnMallocFailure().
BASE_EXPORT void* UncheckedAlloc(size_t size);
// Frees memory allocated with UncheckedAlloc().
BASE_EXPORT void UncheckedFree(void* ptr);
// Inserts |dispatch| in front of the allocator chain. This method is
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
// The callers have responsibility for inserting a single dispatch no more
// than once.
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
// removal of arbitrary elements from a singly linked list would require a lock
// in malloc(), which we really don't want.
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_WIN)
// Configures the allocator for the caller's allocation domain. Allocations that
// take place prior to this configuration step will succeed, but will not
// benefit from its one-time mitigations. As such, this function must be called
// as early as possible during startup.
BASE_EXPORT void ConfigurePartitionAlloc();
#endif // BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void InitializeDefaultAllocatorPartitionRoot();
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// On macOS, the allocator shim needs to be turned on during runtime.
BASE_EXPORT void InitializeAllocatorShim();
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
using UseDedicatedAlignedPartition =
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
using AlternateBucketDistribution =
base::StrongAlias<class AlternateBucketDistributionTag, bool>;
// If |thread_cache_on_non_quarantinable_partition| is specified, the
// thread-cache will be enabled on the non-quarantinable partition. The
// thread-cache on the main (malloc) partition will be disabled.
BASE_EXPORT void ConfigurePartitions(
EnableBrp enable_brp,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution);
#if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT void EnablePCScan(base::internal::PCScan::InitConfig);
#endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_

View File

@ -0,0 +1,122 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "base/allocator/allocator_shim.h"
#include "base/compiler_specific.h"
#include "base/numerics/checked_math.h"
#include "base/process/memory.h"
#include <dlfcn.h>
#include <malloc.h>
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to libc functions.
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
extern "C" {
void* __libc_malloc(size_t size);
void* __libc_calloc(size_t n, size_t size);
void* __libc_realloc(void* address, size_t size);
void* __libc_memalign(size_t alignment, size_t size);
void __libc_free(void* ptr);
} // extern "C"
namespace {
using base::allocator::AllocatorDispatch;
// Strictly speaking, it would make more sense to not subtract amything, but
// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
// most platforms), and tests expect that.
constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
// Cannot force glibc's malloc() to crash when a large size is requested, do
// it in the shim instead.
if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_malloc(size);
}
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
return nullptr;
return __libc_malloc(size);
}
void* GlibcCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
const auto total = base::CheckMul(n, size);
if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size * n);
return __libc_calloc(n, size);
}
void* GlibcRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_realloc(address, size);
}
void* GlibcMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_memalign(alignment, size);
}
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
__libc_free(address);
}
NO_SANITIZE("cfi-icall")
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
// resolve it instead. This should be safe because glibc (and hence dlfcn)
// does not use malloc_size internally and so there should not be a risk of
// recursion.
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
static MallocUsableSizeFunction fn_ptr =
reinterpret_cast<MallocUsableSizeFunction>(
dlsym(RTLD_NEXT, "malloc_usable_size"));
return fn_ptr(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&GlibcMalloc, /* alloc_function */
&GlibcUncheckedMalloc, /* alloc_unchecked_function */
&GlibcCalloc, /* alloc_zero_initialized_function */
&GlibcMemalign, /* alloc_aligned_function */
&GlibcRealloc, /* realloc_function */
&GlibcFree, /* free_function */
&GlibcGetSizeEstimate, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};

View File

@ -0,0 +1,77 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <malloc.h>
#include "base/allocator/allocator_shim.h"
#include "build/build_config.h"
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to the original libc functions when using the link-time
// -Wl,-wrap,malloc approach (see README.md).
// The __real_X functions here are special symbols that the linker will relocate
// against the real "X" undefined symbol, so that __real_malloc becomes the
// equivalent of what an undefined malloc symbol reference would have been.
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
// which routes the __wrap_X functions into the shim.
extern "C" {
void* __real_malloc(size_t);
void* __real_calloc(size_t, size_t);
void* __real_realloc(void*, size_t);
void* __real_memalign(size_t, size_t);
void __real_free(void*);
} // extern "C"
namespace {
using base::allocator::AllocatorDispatch;
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
return __real_malloc(size);
}
void* RealCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
return __real_calloc(n, size);
}
void* RealRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return __real_realloc(address, size);
}
void* RealMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
return __real_memalign(alignment, size);
}
void RealFree(const AllocatorDispatch*, void* address, void* context) {
__real_free(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&RealMalloc, /* alloc_function */
&RealMalloc, /* alloc_unchecked_function */
&RealCalloc, /* alloc_zero_initialized_function */
&RealMemalign, /* alloc_aligned_function */
&RealRealloc, /* realloc_function */
&RealFree, /* free_function */
nullptr, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};

View File

@ -0,0 +1,107 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <utility>
#include "base/allocator/allocator_interception_mac.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/malloc_zone_functions_mac.h"
namespace base {
namespace allocator {
namespace {
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
size);
}
void* CallocImpl(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
size);
}
void* MemalignImpl(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
alignment, size);
}
void* ReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
ptr, size);
}
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
unsigned BatchMallocImpl(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.batch_malloc(
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
num_requested);
}
void BatchFreeImpl(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
to_be_freed, num_to_be_freed);
}
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free_definite_size(
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&MallocImpl, /* alloc_function */
&MallocImpl, /* alloc_unchecked_function */
&CallocImpl, /* alloc_zero_initialized_function */
&MemalignImpl, /* alloc_aligned_function */
&ReallocImpl, /* realloc_function */
&FreeImpl, /* free_function */
&GetSizeEstimateImpl, /* get_size_estimate_function */
&BatchMallocImpl, /* batch_malloc_function */
&BatchFreeImpl, /* batch_free_function */
&FreeDefiniteSizeImpl, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,774 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#include <atomic>
#include <cstddef>
#include <map>
#include <string>
#include <tuple>
#include "base/allocator/allocator_shim_internals.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/memory/nonscannable_memory.h"
#include "base/numerics/checked_math.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <malloc.h>
#endif
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
#include <windows.h>
#endif
using base::allocator::AllocatorDispatch;
namespace {
class SimpleScopedSpinLocker {
public:
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
// Lock. Semantically equivalent to base::Lock::Acquire().
bool expected = false;
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
// in this case we don't imply any ordering.
//
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
while (!lock_.compare_exchange_weak(
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
expected = false;
}
}
~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
private:
std::atomic<bool>& lock_;
};
// We can't use a "static local" or a base::LazyInstance, as:
// - static local variables call into the runtime on Windows, which is not
// prepared to handle it, as the first allocation happens during CRT init.
// - We don't want to depend on base::LazyInstance, which may be converted to
// static locals one day.
//
// Nevertheless, this provides essentially the same thing.
template <typename T, typename Constructor>
class LeakySingleton {
public:
constexpr LeakySingleton() = default;
ALWAYS_INLINE T* Get() {
auto* instance = instance_.load(std::memory_order_acquire);
if (LIKELY(instance))
return instance;
return GetSlowPath();
}
// Replaces the instance pointer with a new one.
void Replace(T* new_instance) {
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
// Modify under the lock to avoid race between |if (instance)| and
// |instance_.store()| in GetSlowPath().
instance_.store(new_instance, std::memory_order_release);
}
private:
T* GetSlowPath();
std::atomic<T*> instance_;
alignas(T) uint8_t instance_buffer_[sizeof(T)];
std::atomic<bool> initialization_lock_;
};
template <typename T, typename Constructor>
T* LeakySingleton<T, Constructor>::GetSlowPath() {
// The instance has not been set, the proper way to proceed (correct
// double-checked locking) is:
//
// auto* instance = instance_.load(std::memory_order_acquire);
// if (!instance) {
// ScopedLock initialization_lock;
// root = instance_.load(std::memory_order_relaxed);
// if (root)
// return root;
// instance = Create new root;
// instance_.store(instance, std::memory_order_release);
// return instance;
// }
//
// However, we don't want to use a base::Lock here, so instead we use
// compare-and-exchange on a lock variable, which provides the same
// guarantees.
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
T* instance = instance_.load(std::memory_order_relaxed);
// Someone beat us.
if (instance)
return instance;
instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
instance_.store(instance, std::memory_order_release);
return instance;
}
class MainPartitionConstructor {
public:
static base::ThreadSafePartitionRoot* New(void* buffer) {
constexpr base::PartitionOptions::ThreadCache thread_cache =
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Additional partitions may be created in ConfigurePartitions(). Since
// only one partition can have thread cache enabled, postpone the
// decision to turn the thread cache on until after that call.
// TODO(bartekn): Enable it here by default, once the "split-only" mode
// is no longer needed.
base::PartitionOptions::ThreadCache::kDisabled;
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Other tests, such as the ThreadCache tests create a thread cache,
// and only one is supported at a time.
base::PartitionOptions::ThreadCache::kDisabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* new_root = new (buffer) base::ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed,
thread_cache,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
return new_root;
}
};
LeakySingleton<base::ThreadSafePartitionRoot, MainPartitionConstructor> g_root
CONSTINIT = {};
base::ThreadSafePartitionRoot* Allocator() {
return g_root.Get();
}
// Original g_root_ if it was replaced by ConfigurePartitions().
std::atomic<base::ThreadSafePartitionRoot*> g_original_root(nullptr);
class AlignedPartitionConstructor {
public:
static base::ThreadSafePartitionRoot* New(void* buffer) {
return g_root.Get();
}
};
LeakySingleton<base::ThreadSafePartitionRoot, AlignedPartitionConstructor>
g_aligned_root CONSTINIT = {};
base::ThreadSafePartitionRoot* OriginalAllocator() {
return g_original_root.load(std::memory_order_relaxed);
}
base::ThreadSafePartitionRoot* AlignedAllocator() {
return g_aligned_root.Get();
}
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
bool IsRunning32bitEmulatedOnArm64() {
using IsWow64Process2Function = decltype(&IsWow64Process2);
IsWow64Process2Function is_wow64_process2 =
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
if (!is_wow64_process2)
return false;
USHORT process_machine;
USHORT native_machine;
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
&native_machine);
if (!retval)
return false;
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
return true;
return false;
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
size_t g_extra_bytes;
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
return size;
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
}
void* AllocateAlignedMemory(size_t alignment, size_t size) {
// Memory returned by the regular allocator *always* respects |kAlignment|,
// which is a power of two, and any valid alignment is also a power of two. So
// we can directly fulfill these requests with the main allocator.
//
// This has several advantages:
// - The thread cache is supported on the main partition
// - Reduced fragmentation
// - Better coverage for MiraclePtr variants requiring extras
//
// There are several call sites in Chromium where base::AlignedAlloc is called
// with a small alignment. Some may be due to overly-careful code, some are
// because the client code doesn't know the required alignment at compile
// time.
//
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
// instance) directly call PartitionFree(), so there is no risk of
// mismatch. (see below the default_dispatch definition).
if (alignment <= base::kAlignment) {
// This is mandated by |posix_memalign()| and friends, so should never fire.
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
// TODO(bartekn): See if the compiler optimizes branches down the stack on
// Mac, where PartitionPageSize() isn't constexpr.
return Allocator()->AllocWithFlagsNoHooks(0, size,
base::PartitionPageSize());
}
return AlignedAllocator()->AlignedAllocWithFlags(
partition_alloc::AllocFlags::kNoHooks, alignment, size);
}
} // namespace
namespace base {
namespace internal {
namespace {
#if BUILDFLAG(IS_APPLE)
int g_alloc_flags = 0;
#else
constexpr int g_alloc_flags = 0;
#endif
} // namespace
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
#if BUILDFLAG(IS_APPLE)
// We generally prefer to always crash rather than returning nullptr for
// OOM. However, on some macOS releases, we have to locally allow it due to
// weirdness in OS code. See https://crbug.com/654695 for details.
//
// Apple only since it's not needed elsewhere, and there is a performance
// penalty.
if (value)
g_alloc_flags = 0;
else
g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
#endif
}
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks(
0 | g_alloc_flags, MaybeAdjustSize(size), PartitionPageSize());
}
void* PartitionMallocUnchecked(const AllocatorDispatch*,
size_t size,
void* context) {
ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
MaybeAdjustSize(size), PartitionPageSize());
}
void* PartitionCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
ScopedDisallowAllocations guard{};
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
PartitionPageSize());
}
void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
ScopedDisallowAllocations guard{};
return AllocateAlignedMemory(alignment, size);
}
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context) {
ScopedDisallowAllocations guard{};
return AllocateAlignedMemory(alignment, size);
}
// aligned_realloc documentation is
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
// TODO(tasak): Expand the given memory block to the given size if possible.
// This realloc always free the original memory block and allocates a new memory
// block.
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
// and use it.
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
void* address,
size_t size,
size_t alignment,
void* context) {
ScopedDisallowAllocations guard{};
void* new_ptr = nullptr;
if (size > 0) {
size = MaybeAdjustSize(size);
new_ptr = AllocateAlignedMemory(alignment, size);
} else {
// size == 0 and address != null means just "free(address)".
if (address)
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
// The original memory block (specified by address) is unchanged if ENOMEM.
if (!new_ptr)
return nullptr;
// TODO(tasak): Need to compare the new alignment with the address' alignment.
// If the two alignments are not the same, need to return nullptr with EINVAL.
if (address) {
size_t usage = base::ThreadSafePartitionRoot::GetUsableSize(address);
size_t copy_size = usage > size ? size : usage;
memcpy(new_ptr, address, copy_size);
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
return new_ptr;
}
void* PartitionRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE)
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `realloc` which supports zone-
// dispatching so that it appropriately selects the right zone.
return realloc(address, size);
}
#endif // BUILDFLAG(IS_APPLE)
return Allocator()->ReallocWithFlags(
partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address,
MaybeAdjustSize(size), "");
}
#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(IS_CHROMECAST)
extern "C" {
void __real_free(void*);
} // extern "C"
#endif
void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE)
// TODO(bartekn): Add MTE unmasking here (and below).
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free` which supports zone-
// dispatching so that it appropriately selects the right zone.
return free(object);
}
#endif // BUILDFLAG(IS_APPLE)
// On Chromecast, there is at least one case where a system malloc() pointer
// can be passed to PartitionAlloc's free(). If we don't own the pointer, pass
// it along. This should not have a runtime cost vs regular Android, since on
// Android we have a PA_CHECK() rather than the branch here.
#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(IS_CHROMECAST)
if (UNLIKELY(!base::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free()`, which is `__real_free()`
// here.
return __real_free(object);
}
#endif
base::ThreadSafePartitionRoot::FreeNoHooks(object);
}
#if BUILDFLAG(IS_APPLE)
// Normal free() path on Apple OSes:
// 1. size = GetSizeEstimate(ptr);
// 2. if (size) FreeDefiniteSize(ptr, size)
//
// So we don't need to re-check that the pointer is owned in Free(), and we
// can use the size.
void PartitionFreeDefiniteSize(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
ScopedDisallowAllocations guard{};
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
// still useful though, as we avoid double-checking that the address is owned.
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
#endif // BUILDFLAG(IS_APPLE)
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
// This is used to implement malloc_usable_size(3). Per its man page, "if ptr
// is NULL, 0 is returned".
if (!address)
return 0;
#if BUILDFLAG(IS_APPLE)
if (!base::IsManagedByPartitionAlloc(reinterpret_cast<uintptr_t>(address))) {
// The object pointed to by `address` is not allocated by the
// PartitionAlloc. The return value `0` means that the pointer does not
// belong to this malloc zone.
return 0;
}
#endif // BUILDFLAG(IS_APPLE)
// TODO(lizeb): Returns incorrect values for aligned allocations.
const size_t size = base::ThreadSafePartitionRoot::GetUsableSize(address);
#if BUILDFLAG(IS_APPLE)
// The object pointed to by `address` is allocated by the PartitionAlloc.
// So, this function must not return zero so that the malloc zone dispatcher
// finds the appropriate malloc zone.
PA_DCHECK(size);
#endif // BUILDFLAG(IS_APPLE)
return size;
}
unsigned PartitionBatchMalloc(const AllocatorDispatch*,
size_t size,
void** results,
unsigned num_requested,
void* context) {
// No real batching: we could only acquire the lock once for instance, keep it
// simple for now.
for (unsigned i = 0; i < num_requested; i++) {
// No need to check the results, we crash if it fails.
results[i] = PartitionMalloc(nullptr, size, nullptr);
}
// Either all succeeded, or we crashed.
return num_requested;
}
void PartitionBatchFree(const AllocatorDispatch*,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// No real batching: we could only acquire the lock once for instance, keep it
// simple for now.
for (unsigned i = 0; i < num_to_be_freed; i++) {
PartitionFree(nullptr, to_be_freed[i], nullptr);
}
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
return ::Allocator();
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
return ::OriginalAllocator();
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
return ::AlignedAllocator();
}
} // namespace internal
} // namespace base
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
namespace base {
namespace allocator {
void EnablePartitionAllocMemoryReclaimer() {
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
// their PartitionRoots to the memory reclaimer, because doing so may allocate
// memory. Thus, the registration to the memory reclaimer has to be done
// some time later, when the main root is fully configured.
// TODO(bartekn): Aligned allocator can use the regular initialization path.
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
Allocator());
auto* original_root = OriginalAllocator();
if (original_root)
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
original_root);
if (AlignedAllocator() != Allocator()) {
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
AlignedAllocator());
}
}
alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_new_main_partition[sizeof(
base::ThreadSafePartitionRoot)];
alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_aligned_alloc_partition[sizeof(
base::ThreadSafePartitionRoot)];
void ConfigurePartitions(
EnableBrp enable_brp,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution) {
// BRP cannot be enabled without splitting the main partition. Furthermore, in
// the "before allocation" mode, it can't be enabled without further splitting
// out the aligned partition.
PA_CHECK(!enable_brp || split_main_partition);
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
#endif
// Can't split out the aligned partition, without splitting the main one.
PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
static bool configured = false;
PA_CHECK(!configured);
configured = true;
// Calling Get() is actually important, even if the return values weren't
// used, because it has a side effect of initializing the variables, if they
// weren't already.
auto* current_root = g_root.Get();
auto* current_aligned_root = g_aligned_root.Get();
if (!split_main_partition) {
if (!use_alternate_bucket_distribution) {
current_root->SwitchToDenserBucketDistribution();
current_aligned_root->SwitchToDenserBucketDistribution();
}
PA_DCHECK(!enable_brp);
PA_DCHECK(!use_dedicated_aligned_partition);
PA_DCHECK(!current_root->with_thread_cache);
return;
}
auto* new_root =
new (g_allocator_buffer_for_new_main_partition) ThreadSafePartitionRoot({
!use_dedicated_aligned_partition
? base::PartitionOptions::AlignedAlloc::kAllowed
: base::PartitionOptions::AlignedAlloc::kDisallowed,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
enable_brp ? base::PartitionOptions::BackupRefPtr::kEnabled
: base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
base::ThreadSafePartitionRoot* new_aligned_root;
if (use_dedicated_aligned_partition) {
// TODO(bartekn): Use the original root instead of creating a new one. It'd
// result in one less partition, but come at a cost of commingling types.
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
} else {
// The new main root can also support AlignedAlloc.
new_aligned_root = new_root;
}
// Now switch traffic to the new partitions.
g_aligned_root.Replace(new_aligned_root);
g_root.Replace(new_root);
// g_original_root has to be set after g_root, because other code doesn't
// handle well both pointing to the same root.
// TODO(bartekn): Reorder, once handled well. It isn't ideal for one
// partition to be invisible temporarily.
g_original_root = current_root;
// No need for g_original_aligned_root, because in cases where g_aligned_root
// is replaced, it must've been g_original_root.
PA_CHECK(current_aligned_root == g_original_root);
// Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages);
if (!use_alternate_bucket_distribution) {
g_root.Get()->SwitchToDenserBucketDistribution();
g_aligned_root.Get()->SwitchToDenserBucketDistribution();
}
}
#if defined(PA_ALLOW_PCSCAN)
void EnablePCScan(base::internal::PCScan::InitConfig config) {
internal::PCScan::Initialize(config);
internal::PCScan::RegisterScannableRoot(Allocator());
if (OriginalAllocator() != nullptr)
internal::PCScan::RegisterScannableRoot(OriginalAllocator());
if (Allocator() != AlignedAllocator())
internal::PCScan::RegisterScannableRoot(AlignedAllocator());
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
}
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(IS_WIN)
// Call this as soon as possible during startup.
void ConfigurePartitionAlloc() {
#if defined(ARCH_CPU_X86)
if (IsRunning32bitEmulatedOnArm64())
g_extra_bytes = 8;
#endif // defined(ARCH_CPU_X86)
}
#endif // BUILDFLAG(IS_WIN)
} // namespace allocator
} // namespace base
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&base::internal::PartitionMalloc, // alloc_function
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
&base::internal::PartitionMemalign, // alloc_aligned_function
&base::internal::PartitionRealloc, // realloc_function
&base::internal::PartitionFree, // free_function
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
&base::internal::PartitionBatchMalloc, // batch_malloc_function
&base::internal::PartitionBatchFree, // batch_free_function
#if BUILDFLAG(IS_APPLE)
// On Apple OSes, free_definite_size() is always called from free(), since
// get_size_estimate() is used to determine whether an allocation belongs to
// the current zone. It makes sense to optimize for it.
&base::internal::PartitionFreeDefiniteSize,
#else
nullptr, // free_definite_size_function
#endif
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
&base::internal::PartitionFree, // aligned_free_function
nullptr, // next
};
// Intercept diagnostics symbols as well, even though they are not part of the
// unified shim layer.
//
// TODO(lizeb): Implement the ones that doable.
extern "C" {
#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
return 0;
}
#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
base::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper);
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
base::SimplePartitionStatsDumper aligned_allocator_dumper;
if (AlignedAllocator() != Allocator()) {
AlignedAllocator()->DumpStats("posix_memalign", true,
&aligned_allocator_dumper);
}
// Dump stats for nonscannable and nonquarantinable allocators.
auto& nonscannable_allocator =
base::internal::NonScannableAllocator::Instance();
base::SimplePartitionStatsDumper nonscannable_allocator_dumper;
if (auto* nonscannable_root = nonscannable_allocator.root())
nonscannable_root->DumpStats("malloc", true,
&nonscannable_allocator_dumper);
auto& nonquarantinable_allocator =
base::internal::NonQuarantinableAllocator::Instance();
base::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
nonquarantinable_root->DumpStats("malloc", true,
&nonquarantinable_allocator_dumper);
struct mallinfo info = {0};
info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size.
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes;
// Resident bytes.
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes;
// Allocated bytes.
info.uordblks = allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes;
return info;
}
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
} // extern "C"
#if BUILDFLAG(IS_APPLE)
namespace base {
namespace allocator {
void InitializeDefaultAllocatorPartitionRoot() {
// On OS_APPLE, the initialization of PartitionRoot uses memory allocations
// internally, e.g. __builtin_available, and it's not easy to avoid it.
// Thus, we initialize the PartitionRoot with using the system default
// allocator before we intercept the system default allocator.
std::ignore = Allocator();
}
} // namespace allocator
} // namespace base
#endif // BUILDFLAG(IS_APPLE)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -0,0 +1,75 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
#include "base/allocator/allocator_shim.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/base_export.h"
namespace base {
namespace internal {
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
class BASE_EXPORT PartitionAllocMalloc {
public:
static ThreadSafePartitionRoot* Allocator();
// May return |nullptr|, will never return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* OriginalAllocator();
// May return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* AlignedAllocator();
};
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
size_t size,
void* context);
BASE_EXPORT void* PartitionMallocUnchecked(
const base::allocator::AllocatorDispatch*,
size_t size,
void* context);
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
size_t n,
size_t size,
void* context);
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
size_t alignment,
size_t size,
void* context);
BASE_EXPORT void* PartitionAlignedAlloc(
const base::allocator::AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context);
BASE_EXPORT void* PartitionAlignedRealloc(
const base::allocator::AllocatorDispatch* dispatch,
void* address,
size_t size,
size_t alignment,
void* context);
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
void* address,
size_t size,
void* context);
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
void* object,
void* context);
BASE_EXPORT size_t
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
void* address,
void* context);
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_

View File

@ -0,0 +1,106 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include <ostream>
#include "base/allocator/winheap_stubs_win.h"
#include "base/check.h"
namespace {
using base::allocator::AllocatorDispatch;
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
size_t size,
void* context) {
return base::allocator::WinHeapMalloc(size);
}
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
size_t n,
size_t elem_size,
void* context) {
// Overflow check.
const size_t size = n * elem_size;
if (elem_size != 0 && size / elem_size != n)
return nullptr;
void* result = DefaultWinHeapMallocImpl(self, size, context);
if (result) {
memset(result, 0, size);
}
return result;
}
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
CHECK(false) << "The windows heap does not support memalign.";
return nullptr;
}
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
return base::allocator::WinHeapRealloc(address, size);
}
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
void* address,
void* context) {
base::allocator::WinHeapFree(address);
}
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
void* address,
void* context) {
return base::allocator::WinHeapGetSizeEstimate(address);
}
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedMalloc(size, alignment);
}
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
}
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
void* ptr,
void* context) {
base::allocator::WinHeapAlignedFree(ptr);
}
} // namespace
// Guarantee that default_dispatch is compile-time initialized to avoid using
// it before initialization (allocations before main in release builds with
// optimizations disabled).
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
&DefaultWinHeapMallocImpl,
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
&DefaultWinHeapCallocImpl,
&DefaultWinHeapMemalignImpl,
&DefaultWinHeapReallocImpl,
&DefaultWinHeapFreeImpl,
&DefaultWinHeapGetSizeEstimateImpl,
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
&DefaultWinHeapAlignedMallocImpl,
&DefaultWinHeapAlignedReallocImpl,
&DefaultWinHeapAlignedFreeImpl,
nullptr, /* next */
};

View File

@ -0,0 +1,53 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
#include "build/build_config.h"
#if defined(__GNUC__)
#if BUILDFLAG(IS_POSIX)
#include <sys/cdefs.h> // for __THROW
#endif
#ifndef __THROW // Not a glibc system
#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
#define __THROW _NOEXCEPT
#else
#define __THROW
#endif // !_NOEXCEPT
#endif
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
//
// If an exported symbol is linked into a DSO, it may be preempted by a
// definition in the main executable. If this happens to an allocator symbol, it
// will mean that the DSO will use the main executable's allocator. This is
// normally relatively harmless -- regular allocations should all use the same
// allocator, but if the DSO tries to hook the allocator it will not see any
// allocations.
//
// However, if LLVM LTO is enabled, the compiler may inline the shim layer
// symbols into callers. The end result is that allocator calls in DSOs may use
// either the main executable's allocator or the DSO's allocator, depending on
// whether the call was inlined. This is arguably a bug in LLVM caused by its
// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
// To work around the bug we use noinline to prevent the symbols from being
// inlined.
//
// In the long run we probably want to avoid linking the allocator bits into
// DSOs altogether. This will save a little space and stop giving DSOs the false
// impression that they can hook the allocator.
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
#elif BUILDFLAG(IS_WIN) // __GNUC__
#define __THROW
#define SHIM_ALWAYS_EXPORT __declspec(noinline)
#endif // __GNUC__
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_

View File

@ -0,0 +1,166 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
// Preempt the default new/delete C++ symbols so they call the shim entry
// points. This file is strongly inspired by tcmalloc's
// libc_override_redefine.h.
#include <new>
#include "base/allocator/allocator_shim_internals.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
// std::align_val_t isn't available until C++17, but we want to override aligned
// new/delete anyway to prevent a possible situation where a library gets loaded
// in that uses the aligned operators. We want to avoid a situation where
// separate heaps are used.
// TODO(thomasanderson): Remove this once building with C++17 or later.
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
#define ALIGN_VAL_T std::align_val_t
#define ALIGN_LINKAGE
#define ALIGN_NEW operator new
#define ALIGN_NEW_NOTHROW operator new
#define ALIGN_DEL operator delete
#define ALIGN_DEL_SIZED operator delete
#define ALIGN_DEL_NOTHROW operator delete
#define ALIGN_NEW_ARR operator new[]
#define ALIGN_NEW_ARR_NOTHROW operator new[]
#define ALIGN_DEL_ARR operator delete[]
#define ALIGN_DEL_ARR_SIZED operator delete[]
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
#else
#define ALIGN_VAL_T size_t
#define ALIGN_LINKAGE extern "C"
#if BUILDFLAG(IS_WIN)
#error "Mangling is different on these platforms."
#else
#define ALIGN_NEW _ZnwmSt11align_val_t
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL _ZdlPvSt11align_val_t
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
#endif
#endif
#if !BUILDFLAG(IS_APPLE)
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
#else
// On Apple OSes, prefer not exporting these symbols (as this reverts to the
// default behavior, they are still exported in e.g. component builds). This is
// partly due to intentional limits on exported symbols in the main library, but
// it is also needless, since no library used on macOS imports these.
//
// TODO(lizeb): It may not be necessary anywhere to export these.
#define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
#endif
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
return ShimCppNew(size);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
return ShimCppNew(size);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW(std::size_t size,
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_NOTHROW(
std::size_t size,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL(void* p,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR(
std::size_t size,
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
std::size_t size,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL_ARR(void* p,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_ARR_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}

View File

@ -0,0 +1,119 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
// Alias the internal Glibc symbols to the shim entry points.
// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
// Effectively this file does two things:
// 1) Re-define the __malloc_hook & co symbols. Those symbols are defined as
// weak in glibc and are meant to be defined strongly by client processes
// to hook calls initiated from within glibc.
// 2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
// is that in the past (in RedHat 9) we had instances of libraries that were
// allocating via malloc() and freeing using __libc_free().
// See tcmalloc's libc_override_glibc.h for more context.
#include <features.h> // for __GLIBC__
#include <malloc.h>
#include <unistd.h>
#include <new>
#include "base/allocator/allocator_shim_internals.h"
// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
#if !defined(__MALLOC_HOOK_VOLATILE)
#define MALLOC_HOOK_MAYBE_VOLATILE /**/
#else
#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
#endif
extern "C" {
// 1) Re-define malloc_hook weak symbols.
namespace {
void* GlibcMallocHook(size_t size, const void* caller) {
return ShimMalloc(size, nullptr);
}
void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
return ShimRealloc(ptr, size, nullptr);
}
void GlibcFreeHook(void* ptr, const void* caller) {
return ShimFree(ptr, nullptr);
}
void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
return ShimMemalign(align, size, nullptr);
}
} // namespace
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
const void*) = &GlibcMallocHook;
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
&GlibcReallocHook;
__attribute__((visibility("default"))) void (
*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
const void*) = &GlibcFreeHook;
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
&GlibcMemalignHook;
// 2) Redefine libc symbols themselves.
SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
return ShimRealloc(ptr, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
return ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
return ShimValloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
return ShimPosixMemalign(r, a, s);
}
} // extern "C"
// Safety check.
#if !defined(__GLIBC__)
#error The target platform does not seem to use Glibc. Disable the allocator \
shim by setting use_allocator_shim=false in GN args.
#endif

View File

@ -0,0 +1,88 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Its purpose is to preempt the Libc symbols for malloc/new so they call the
// shim layer entry points.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include <malloc/malloc.h>
#else
#include <malloc.h>
#endif
#include "base/allocator/allocator_shim_internals.h"
extern "C" {
// WARNING: Whenever a new function is added there (which, surprisingly enough,
// happens. For instance glibc 2.33 introduced mallinfo2(), which we don't
// support... yet?), it MUST be added to build/linux/chrome.map.
//
// Otherwise the new symbol is not exported from Chromium's main binary, which
// is necessary to override libc's weak symbol, which in turn is necessary to
// intercept calls made by dynamic libraries. See crbug.com/1292206 for such
// an example.
SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
return ShimRealloc(ptr, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
return ShimValloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
return ShimPosixMemalign(r, a, s);
}
SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
return ShimGetSizeEstimate(address, nullptr);
}
SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
return ShimGetSizeEstimate(address, nullptr);
}
// The default dispatch translation unit has to define also the following
// symbols (unless they are ultimately routed to the system symbols):
// void malloc_stats(void);
// int mallopt(int, int);
// struct mallinfo mallinfo(void);
} // extern "C"

View File

@ -0,0 +1,147 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
// This header overrides the __wrap_X symbols when using the link-time
// -Wl,-wrap,malloc shim-layer approach (see README.md).
// All references to malloc, free, etc. within the linker unit that gets the
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
#include <algorithm>
#include <cstring>
#include "base/allocator/allocator_shim_internals.h"
extern "C" {
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
return ShimMemalign(align, size, nullptr);
}
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
size_t align,
size_t size) {
return ShimPosixMemalign(res, align, size);
}
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
return ShimRealloc(address, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
return ShimValloc(size, nullptr);
}
const size_t kPathMaxSize = 8192;
static_assert(kPathMaxSize >= PATH_MAX, "");
extern char* __wrap_strdup(const char* str);
// Override <stdlib.h>
extern char* __real_realpath(const char* path, char* resolved_path);
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
char* resolved_path) {
if (resolved_path)
return __real_realpath(path, resolved_path);
char buffer[kPathMaxSize];
if (!__real_realpath(path, buffer))
return nullptr;
return __wrap_strdup(buffer);
}
// Override <string.h> functions
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
std::size_t length = std::strlen(str) + 1;
void* buffer = ShimMalloc(length, nullptr);
if (!buffer)
return nullptr;
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
}
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
std::size_t length = std::min(std::strlen(str), n);
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
if (!buffer)
return nullptr;
std::memcpy(buffer, str, length);
buffer[length] = '\0';
return buffer;
}
// Override <unistd.h>
extern char* __real_getcwd(char* buffer, size_t size);
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
if (buffer)
return __real_getcwd(buffer, size);
if (!size)
size = kPathMaxSize;
char local_buffer[size];
if (!__real_getcwd(local_buffer, size))
return nullptr;
return __wrap_strdup(local_buffer);
}
// Override stdio.h
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
// Android, and used by libc++.
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
const char* fmt,
va_list va_args) {
constexpr int kInitialSize = 128;
*strp = static_cast<char*>(
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
// Now we know the size. This is not very efficient, but we cannot really do
// better without accessing internal libc functions, or reimplementing
// *printf().
//
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
// details.
if (actual_size >= kInitialSize)
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
return actual_size;
}
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
va_list va_args;
va_start(va_args, fmt);
int retval = vasprintf(strp, fmt, va_args);
va_end(va_args);
return retval;
}
} // extern "C"

View File

@ -0,0 +1,376 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#error This header must be included iff PartitionAlloc-Everywhere is enabled.
#endif
#include <string.h>
#include <tuple>
#include "base/allocator/early_zone_registration_mac.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/bits.h"
#include "base/logging.h"
namespace base {
// Defined in base/allocator/partition_allocator/partition_root.cc
void PartitionAllocMallocHookOnBeforeForkInParent();
void PartitionAllocMallocHookOnAfterForkInParent();
void PartitionAllocMallocHookOnAfterForkInChild();
namespace allocator {
namespace {
// malloc_introspection_t's callback functions for our own zone
kern_return_t MallocIntrospectionEnumerator(task_t task,
void*,
unsigned type_mask,
vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder) {
// Should enumerate all memory regions allocated by this allocator, but not
// implemented just because of no use case for now.
return KERN_FAILURE;
}
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
return base::bits::AlignUp(size, base::kAlignment);
}
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
// Should check the consistency of the allocator implementing this malloc
// zone, but not implemented just because of no use case for now.
return true;
}
void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
// Should print the current states of the zone for debugging / investigation
// purpose, but not implemented just because of no use case for now.
}
void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
// Should enable logging of the activities on the given `address`, but not
// implemented just because of no use case for now.
}
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
// Called before fork(2) to acquire the lock.
PartitionAllocMallocHookOnBeforeForkInParent();
}
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
// Called in the parent process after fork(2) to release the lock.
PartitionAllocMallocHookOnAfterForkInParent();
}
void MallocIntrospectionStatistics(malloc_zone_t* zone,
malloc_statistics_t* stats) {
// Should report the memory usage correctly, but not implemented just because
// of no use case for now.
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0; // High water mark of touched memory
stats->size_allocated = 0; // Reserved in memory
}
boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
// Should return true if the underlying PartitionRoot is locked, but not
// implemented just because this function seems not used effectively.
return false;
}
boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
// 'discharge' is not supported.
return false;
}
void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
// 'discharge' is not supported.
}
void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
// 'discharge' is not supported.
}
void MallocIntrospectionEnumerateDischargedPointers(
malloc_zone_t* zone,
void (^report_discharged)(void* memory, void* info)) {
// 'discharge' is not supported.
}
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
// Called in a child process after fork(2) to re-initialize the lock.
PartitionAllocMallocHookOnAfterForkInChild();
}
void MallocIntrospectionPrintTask(task_t task,
unsigned level,
vm_address_t zone_address,
memory_reader_t reader,
print_task_printer_t printer) {
// Should print the current states of another process's zone for debugging /
// investigation purpose, but not implemented just because of no use case
// for now.
}
void MallocIntrospectionTaskStatistics(task_t task,
vm_address_t zone_address,
memory_reader_t reader,
malloc_statistics_t* stats) {
// Should report the memory usage in another process's zone, but not
// implemented just because of no use case for now.
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0; // High water mark of touched memory
stats->size_allocated = 0; // Reserved in memory
}
// malloc_zone_t's callback functions for our own zone
size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
return ShimGetSizeEstimate(ptr, nullptr);
}
void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
return ShimMalloc(size, nullptr);
}
void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
return ShimValloc(size, nullptr);
}
void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
return ShimFree(ptr, nullptr);
}
void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
return ShimRealloc(ptr, size, nullptr);
}
void MallocZoneDestroy(malloc_zone_t* zone) {
// No support to destroy the zone for now.
}
void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
return ShimMemalign(alignment, size, nullptr);
}
void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
return ShimFreeDefiniteSize(ptr, size, nullptr);
}
unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
size_t size,
void** results,
unsigned num_requested) {
return ShimBatchMalloc(size, results, num_requested, nullptr);
}
void MallocZoneBatchFree(malloc_zone_t* zone,
void** to_be_freed,
unsigned num) {
return ShimBatchFree(to_be_freed, num, nullptr);
}
malloc_introspection_t g_mac_malloc_introspection{};
malloc_zone_t g_mac_malloc_zone{};
malloc_zone_t* GetDefaultMallocZone() {
// malloc_default_zone() does not return... the default zone, but the initial
// one. The default one is the first element of the default zone array.
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
return reinterpret_cast<malloc_zone_t*>(zones[0]);
}
bool IsAlreadyRegistered() {
// HACK: This should really only be called once, but it is not.
//
// This function is a static constructor of its binary. If it is included in a
// dynamic library, then the same process may end up executing this code
// multiple times, once per library. As a consequence, each new library will
// add its own allocator as the default zone. Aside from splitting the heap
// further, the main issue arises if/when the last library to be loaded
// (dlopen()-ed) gets dlclose()-ed.
//
// See crbug.com/1271139 for details.
//
// In this case, subsequent free() will be routed by libmalloc to the deleted
// zone (since its code has been unloaded from memory), and crash inside
// libsystem's free(). This in practice happens as soon as dlclose() is
// called, inside the dynamic linker (dyld).
//
// Since we are talking about different library, and issues inside the dynamic
// linker, we cannot use a global static variable (which would be
// per-library), or anything from pthread.
//
// The solution used here is to check whether the current default zone is
// already ours, in which case we are not the first dynamic library here, and
// should do nothing. This is racy, and hacky.
vm_address_t* zones = nullptr;
unsigned int zone_count = 0;
// *Not* using malloc_default_zone(), as it seems to be hardcoded to return
// something else than the default zone. See the difference between
// malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
// (in libmalloc).
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
// Checking all the zones, in case someone registered their own zone on top of
// our own.
for (unsigned int i = 0; i < zone_count; i++) {
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
// strcmp() and not a pointer comparison, as the zone was registered from
// another library, the pointers don't match.
if (zone->zone_name &&
(strcmp(zone->zone_name, partition_alloc::kPartitionAllocZoneName) ==
0)) {
// This zone is provided by PartitionAlloc, so this function has been
// called from another library (or the main executable), nothing to do.
//
// This should be a crash, ideally, but callers do it, so only warn, for
// now.
RAW_LOG(ERROR,
"Trying to load the allocator multiple times. This is *not* "
"supported.");
return true;
}
}
return false;
}
void InitializeZone() {
g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
g_mac_malloc_introspection.check = MallocIntrospectionCheck;
g_mac_malloc_introspection.print = MallocIntrospectionPrint;
g_mac_malloc_introspection.log = MallocIntrospectionLog;
g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
g_mac_malloc_introspection.enable_discharge_checking =
MallocIntrospectionEnableDischargeChecking;
g_mac_malloc_introspection.disable_discharge_checking =
MallocIntrospectionDisableDischargeChecking;
g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
g_mac_malloc_introspection.enumerate_discharged_pointers =
MallocIntrospectionEnumerateDischargedPointers;
g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
g_mac_malloc_introspection.task_statistics =
MallocIntrospectionTaskStatistics;
// `version` member indicates which APIs are supported in this zone.
// version >= 5: memalign is supported
// version >= 6: free_definite_size is supported
// version >= 7: introspect's discharge family is supported
// version >= 8: pressure_relief is supported
// version >= 9: introspect.reinit_lock is supported
// version >= 10: claimed_address is supported
// version >= 11: introspect.print_task is supported
// version >= 12: introspect.task_statistics is supported
g_mac_malloc_zone.version = partition_alloc::kZoneVersion;
g_mac_malloc_zone.zone_name = partition_alloc::kPartitionAllocZoneName;
g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
g_mac_malloc_zone.size = MallocZoneSize;
g_mac_malloc_zone.malloc = MallocZoneMalloc;
g_mac_malloc_zone.calloc = MallocZoneCalloc;
g_mac_malloc_zone.valloc = MallocZoneValloc;
g_mac_malloc_zone.free = MallocZoneFree;
g_mac_malloc_zone.realloc = MallocZoneRealloc;
g_mac_malloc_zone.destroy = MallocZoneDestroy;
g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
g_mac_malloc_zone.memalign = MallocZoneMemalign;
g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
g_mac_malloc_zone.pressure_relief = nullptr;
g_mac_malloc_zone.claimed_address = nullptr;
}
// Replaces the default malloc zone with our own malloc zone backed by
// PartitionAlloc. Since we'd like to make as much code as possible to use our
// own memory allocator (and reduce bugs caused by mixed use of the system
// allocator and our own allocator), run the following function
// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
//
// Note that, despite of the highest priority of the initialization order,
// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
// unfortunately and allocates memory with the system allocator. Plus, the
// allocated memory will be deallocated with the default zone's `free` at that
// moment without using a zone dispatcher. Hence, our own `free` function
// receives an address allocated by the system allocator.
__attribute__((constructor(0))) void
InitializeDefaultMallocZoneWithPartitionAlloc() {
if (IsAlreadyRegistered())
return;
// Instantiate the existing regular and purgeable zones in order to make the
// existing purgeable zone use the existing regular zone since PartitionAlloc
// doesn't support a purgeable zone.
std::ignore = malloc_default_zone();
std::ignore = malloc_default_purgeable_zone();
// Initialize the default allocator's PartitionRoot with the existing zone.
InitializeDefaultAllocatorPartitionRoot();
// Create our own malloc zone.
InitializeZone();
malloc_zone_t* system_default_zone = GetDefaultMallocZone();
if (strcmp(system_default_zone->zone_name,
partition_alloc::kDelegatingZoneName) == 0) {
// The first zone is our zone, we can unregister it, replacing it with the
// new one. This relies on a precise zone setup, done in
// |EarlyMallocZoneRegistration()|.
malloc_zone_register(&g_mac_malloc_zone);
malloc_zone_unregister(system_default_zone);
return;
}
// Not in the path where the zone was registered early. This is either racy,
// or fine if the current process is not hosting multiple threads.
//
// This path is fine for e.g. most unit tests.
//
// Make our own zone the default zone.
//
// Put our own zone at the last position, so that it promotes to the default
// zone. The implementation logic of malloc_zone_unregister is:
// zone_table.swap(unregistered_zone, last_zone);
// zone_table.shrink_size_by_1();
malloc_zone_register(&g_mac_malloc_zone);
malloc_zone_unregister(system_default_zone);
// Between malloc_zone_unregister(system_default_zone) (above) and
// malloc_zone_register(system_default_zone) (below), i.e. while absence of
// system_default_zone, it's possible that another thread calls free(ptr) and
// "no zone found" error is hit, crashing the process.
malloc_zone_register(system_default_zone);
// Confirm that our own zone is now the default zone.
CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
}
} // namespace
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,60 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
#include "base/allocator/malloc_zone_functions_mac.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
MallocZoneFunctions new_functions;
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
return ShimGetSizeEstimate(ptr, zone);
};
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
return ShimMalloc(size, zone);
};
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
size_t size) -> void* {
return ShimCalloc(n, size, zone);
};
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
return ShimValloc(size, zone);
};
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
ShimFree(ptr, zone);
};
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
size_t size) -> void* {
return ShimRealloc(ptr, size, zone);
};
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
void** results,
unsigned num_requested) -> unsigned {
return ShimBatchMalloc(size, results, num_requested, zone);
};
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
unsigned num_to_be_freed) -> void {
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
};
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
size_t size) -> void* {
return ShimMemalign(alignment, size, zone);
};
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
size_t size) {
ShimFreeDefiniteSize(ptr, size, zone);
};
return new_functions;
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,178 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This header defines symbols to override the same functions in the Visual C++
// CRT implementation.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
#include <malloc.h>
#include <windows.h>
#include "base/allocator/allocator_shim_internals.h"
// Even though most C++ allocation operators can be left alone since the
// interception works at a lower level, these ones should be
// overridden. Otherwise they redirect to malloc(), which is configured to crash
// with an OOM in failure cases, such as allocation requests that are too large.
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
const std::nothrow_t&) noexcept {
return ShimCppNewNoThrow(size);
}
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
const std::nothrow_t&) noexcept {
return ShimCppNewNoThrow(size);
}
extern "C" {
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
namespace {
int win_new_mode = 0;
} // namespace
// This function behaves similarly to MSVC's _set_new_mode.
// If flag is 0 (default), calls to malloc will behave normally.
// If flag is 1, calls to malloc will behave like calls to new,
// and the std_new_handler will be invoked on failure.
// Returns the previous mode.
//
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
int _set_new_mode(int flag) {
// The MS CRT calls this function early on in startup, so this serves as a low
// overhead proof that the allocator shim is in place for this process.
base::allocator::g_is_win_shim_layer_initialized = true;
int old_mode = win_new_mode;
win_new_mode = flag;
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
return old_mode;
}
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
int _query_new_mode() {
return win_new_mode;
}
// These symbols override the CRT's implementation of the same functions.
__declspec(restrict) void* malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
void free(void* ptr) {
ShimFree(ptr, nullptr);
}
__declspec(restrict) void* realloc(void* ptr, size_t size) {
return ShimRealloc(ptr, size, nullptr);
}
__declspec(restrict) void* calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
// _msize() is the Windows equivalent of malloc_size().
size_t _msize(void* memblock) {
return ShimGetSizeEstimate(memblock, nullptr);
}
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
return ShimAlignedMalloc(size, alignment, nullptr);
}
__declspec(restrict) void* _aligned_realloc(void* address,
size_t size,
size_t alignment) {
return ShimAlignedRealloc(address, size, alignment, nullptr);
}
void _aligned_free(void* address) {
ShimAlignedFree(address, nullptr);
}
// _recalloc_base is called by CRT internally.
__declspec(restrict) void* _recalloc_base(void* block,
size_t count,
size_t size) {
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
base::CheckedNumeric<size_t> new_block_size_checked = count;
new_block_size_checked *= size;
const size_t new_block_size = new_block_size_checked.ValueOrDie();
void* const new_block = realloc(block, new_block_size);
if (new_block != nullptr && old_block_size < new_block_size) {
memset(static_cast<char*>(new_block) + old_block_size, 0,
new_block_size - old_block_size);
}
return new_block;
}
__declspec(restrict) void* _malloc_base(size_t size) {
return malloc(size);
}
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
return calloc(n, size);
}
void _free_base(void* block) {
free(block);
}
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
return _recalloc_base(block, count, size);
}
// The following uncommon _aligned_* routines are not used in Chromium and have
// been shimmed to immediately crash to ensure that implementations are added if
// uses are introduced.
__declspec(restrict) void* _aligned_recalloc(void* address,
size_t num,
size_t size,
size_t alignment) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_realloc(void* address,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
size_t num,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
} // extern "C"
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_

View File

@ -0,0 +1,256 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/early_zone_registration_mac.h"
#include <mach/mach.h>
#include <malloc/malloc.h>
#include "base/allocator/buildflags.h"
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
#if defined(BASE_EXPORT)
#error "This file cannot depend on //base"
#endif
namespace partition_alloc {
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void EarlyMallocZoneRegistration() {}
void AllowDoublePartitionAllocZoneRegistration() {}
#else
extern "C" {
// abort_report_np() records the message in a special section that both the
// system CrashReporter and Crashpad collect in crash reports. See also in
// chrome_exe_main_mac.cc.
void abort_report_np(const char* fmt, ...);
}
namespace {
malloc_zone_t* GetDefaultMallocZone() {
// malloc_default_zone() does not return... the default zone, but the
// initial one. The default one is the first element of the default zone
// array.
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
if (result != KERN_SUCCESS)
abort_report_np("Cannot enumerate malloc() zones");
return reinterpret_cast<malloc_zone_t*>(zones[0]);
}
} // namespace
void EarlyMallocZoneRegistration() {
// Must have static storage duration, as raw pointers are passed to
// libsystem_malloc.
static malloc_zone_t g_delegating_zone;
static malloc_introspection_t g_delegating_zone_introspect;
static malloc_zone_t* g_default_zone;
// Make sure that the default zone is instantiated.
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
g_default_zone = GetDefaultMallocZone();
// The delegating zone:
// - Forwards all allocations to the existing default zone
// - Does *not* claim to own any memory, meaning that it will always be
// skipped in free() in libsystem_malloc.dylib.
//
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
// the main library. Since the main library depends on many external
// libraries, we cannot install PartitionAlloc as the default zone without
// concurrency issues.
//
// Instead, what we do is here, while the process is single-threaded:
// - Register the delegating zone as the default one.
// - Set the original (libsystem_malloc's) one as the second zone
//
// Later, when PartitionAlloc initializes, we replace the default (delegating)
// zone with ours. The end state is:
// 1. PartitionAlloc zone
// 2. libsystem_malloc zone
// Set up of the delegating zone. Note that it doesn't just forward calls to
// the default zone. This is because the system zone's malloc_zone_t pointer
// actually points to a larger struct, containing allocator metadata. So if we
// pass as the first parameter the "simple" delegating zone pointer, then we
// immediately crash inside the system zone functions. So we need to replace
// the zone pointer as well.
//
// Calls fall into 4 categories:
// - Allocation calls: forwarded to the real system zone
// - "Is this pointer yours" calls: always answer no
// - free(): Should never be called, but is in practice, see comments below.
// - Diagnostics and debugging: these are typically called for every
// zone. They are no-ops for us, as we don't want to double-count, or lock
// the data structures of the real zone twice.
// Allocation: Forward to the real zone.
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
return g_default_zone->malloc(g_default_zone, size);
};
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
size_t size) {
return g_default_zone->calloc(g_default_zone, num_items, size);
};
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
return g_default_zone->valloc(g_default_zone, size);
};
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
return g_default_zone->realloc(g_default_zone, ptr, size);
};
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
void** results, unsigned num_requested) {
return g_default_zone->batch_malloc(g_default_zone, size, results,
num_requested);
};
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
size_t size) {
return g_default_zone->memalign(g_default_zone, alignment, size);
};
// Does ptr belong to this zone? Return value is != 0 if so.
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
return 0;
};
// Free functions.
// The normal path for freeing memory is:
// 1. Try all zones in order, call zone->size(ptr)
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
// 3. If no zone matches, crash.
//
// Since this zone always returns 0 in size() (see above), then zone->free()
// should never be called. Unfortunately, this is not the case, as some places
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
// crashing, forward the call. It's the caller's responsibility to use the
// same zone for free() as for the allocation (this is in the contract of
// malloc_zone_free()).
//
// However, note that the sequence of calls size() -> free() is not possible
// for this zone, as size() always returns 0.
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
return g_default_zone->free(g_default_zone, ptr);
};
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
size_t size) {
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
};
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
unsigned num_to_be_freed) {
return g_default_zone->batch_free(g_default_zone, to_be_freed,
num_to_be_freed);
};
// Diagnostics and debugging.
//
// Do nothing to reduce memory footprint, the real
// zone will do it.
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
size_t goal) -> size_t { return 0; };
// Introspection calls are not all optional, for instance locking and
// unlocking before/after fork() is not optional.
//
// Nothing to enumerate.
g_delegating_zone_introspect.enumerator =
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder) -> kern_return_t {
return KERN_SUCCESS;
};
// Need to provide a real implementation, it is used for e.g. array sizing.
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
size_t size) {
return g_default_zone->introspect->good_size(g_default_zone, size);
};
// Nothing to do.
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
return true;
};
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
boolean_t verbose) {};
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
// Do not forward the lock / unlock calls. Since the default zone is still
// there, we should not lock here, as it would lock the zone twice (all
// zones are locked before fork().). Rather, do nothing, since this fake
// zone does not need any locking.
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
// No stats.
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
malloc_statistics_t* stats) {};
// We are not locked.
g_delegating_zone_introspect.zone_locked =
[](malloc_zone_t* zone) -> boolean_t { return false; };
// Don't support discharge checking.
g_delegating_zone_introspect.enable_discharge_checking =
[](malloc_zone_t* zone) -> boolean_t { return false; };
g_delegating_zone_introspect.disable_discharge_checking =
[](malloc_zone_t* zone) {};
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
void* memory) {};
// Could use something lower to support fewer functions, but this is
// consistent with the real zone installed by PartitionAlloc.
g_delegating_zone.version = kZoneVersion;
g_delegating_zone.introspect = &g_delegating_zone_introspect;
// This name is used in PartitionAlloc's initialization to determine whether
// it should replace the delegating zone.
g_delegating_zone.zone_name = kDelegatingZoneName;
// Register puts the new zone at the end, unregister swaps the new zone with
// the last one.
// The zone array is, after these lines, in order:
// 1. |g_default_zone|...|g_delegating_zone|
// 2. |g_delegating_zone|...| (no more default)
// 3. |g_delegating_zone|...|g_default_zone|
malloc_zone_register(&g_delegating_zone);
malloc_zone_unregister(g_default_zone);
malloc_zone_register(g_default_zone);
// Make sure that the purgeable zone is after the default one.
// Will make g_default_zone take the purgeable zone spot
malloc_zone_unregister(purgeable_zone);
// Add back the purgeable zone as the last one.
malloc_zone_register(purgeable_zone);
// Final configuration:
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
// Sanity check.
if (GetDefaultMallocZone() != &g_delegating_zone)
abort_report_np("Failed to install the delegating zone as default.");
}
void AllowDoublePartitionAllocZoneRegistration() {
unsigned int zone_count = 0;
vm_address_t* zones = nullptr;
kern_return_t result =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
if (result != KERN_SUCCESS)
abort_report_np("Cannot enumerate malloc() zones");
// If PartitionAlloc is one of the zones, *change* its name so that
// registration can happen multiple times. This works because zone
// registration only keeps a pointer to the struct, it does not copy the data.
for (unsigned int i = 0; i < zone_count; i++) {
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
if (zone->zone_name &&
strcmp(zone->zone_name, kPartitionAllocZoneName) == 0) {
zone->zone_name = "RenamedPartitionAlloc";
break;
}
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace partition_alloc

View File

@ -0,0 +1,37 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
// the process becomes multi-threaded.
namespace partition_alloc {
static constexpr char kDelegatingZoneName[] =
"DelegatingDefaultZoneForPartitionAlloc";
static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
// Zone version. Determines which callbacks are set in the various malloc_zone_t
// structs.
constexpr int kZoneVersion = 9;
// Must be called *once*, *before* the process becomes multi-threaded.
void EarlyMallocZoneRegistration();
// Tricks the registration code to believe that PartitionAlloc was not already
// registered. This allows a future library load to register PartitionAlloc's
// zone as well, rather than bailing out.
//
// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
// ideally be removed. Indeed, by allowing two zones to be registered, we still
// end up with a split heap, and more memory usage.
//
// This is a hack for crbug.com/1274236.
void AllowDoublePartitionAllocZoneRegistration();
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_

View File

@ -0,0 +1,119 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/malloc_zone_functions_mac.h"
#include <atomic>
#include "base/synchronization/lock.h"
namespace base {
namespace allocator {
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
static_assert(std::is_pod<MallocZoneFunctions>::value,
"MallocZoneFunctions must be POD");
void StoreZoneFunctions(const ChromeMallocZone* zone,
MallocZoneFunctions* functions) {
memset(functions, 0, sizeof(MallocZoneFunctions));
functions->malloc = zone->malloc;
functions->calloc = zone->calloc;
functions->valloc = zone->valloc;
functions->free = zone->free;
functions->realloc = zone->realloc;
functions->size = zone->size;
CHECK(functions->malloc && functions->calloc && functions->valloc &&
functions->free && functions->realloc && functions->size);
// These functions might be nullptr.
functions->batch_malloc = zone->batch_malloc;
functions->batch_free = zone->batch_free;
if (zone->version >= 5) {
// Not all custom malloc zones have a memalign.
functions->memalign = zone->memalign;
}
if (zone->version >= 6) {
// This may be nullptr.
functions->free_definite_size = zone->free_definite_size;
}
// Note that zone version 8 introduced a pressure relief callback, and version
// 10 introduced a claimed address callback, but neither are allocation or
// deallocation callbacks and so aren't important to intercept.
functions->context = zone;
}
namespace {
// All modifications to g_malloc_zones are gated behind this lock.
// Dispatch to a malloc zone does not need to acquire this lock.
base::Lock& GetLock() {
static base::Lock* g_lock = new base::Lock;
return *g_lock;
}
void EnsureMallocZonesInitializedLocked() {
GetLock().AssertAcquired();
}
int g_zone_count = 0;
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
EnsureMallocZonesInitializedLocked();
GetLock().AssertAcquired();
for (int i = 0; i < g_zone_count; ++i) {
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
return true;
}
return false;
}
} // namespace
bool StoreMallocZone(ChromeMallocZone* zone) {
base::AutoLock l(GetLock());
EnsureMallocZonesInitializedLocked();
if (IsMallocZoneAlreadyStoredLocked(zone))
return false;
if (g_zone_count == kMaxZoneCount)
return false;
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
++g_zone_count;
// No other thread can possibly see these stores at this point. The code that
// reads these values is triggered after this function returns. so we want to
// guarantee that they are committed at this stage"
std::atomic_thread_fence(std::memory_order_seq_cst);
return true;
}
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
base::AutoLock l(GetLock());
return IsMallocZoneAlreadyStoredLocked(zone);
}
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
const MallocZoneFunctions* functions) {
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
}
int GetMallocZoneCountForTesting() {
base::AutoLock l(GetLock());
return g_zone_count;
}
void ClearAllMallocZonesForTesting() {
base::AutoLock l(GetLock());
EnsureMallocZonesInitializedLocked();
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
g_zone_count = 0;
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,103 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
#include <malloc/malloc.h>
#include <stddef.h>
#include "base/base_export.h"
#include "base/immediate_crash.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
size_t num_items,
size_t size);
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
void* ptr,
size_t size);
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
size_t alignment,
size_t size);
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
size_t size,
void** results,
unsigned num_requested);
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
void** to_be_freed,
unsigned num_to_be_freed);
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
void* ptr,
size_t size);
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
struct MallocZoneFunctions {
malloc_type malloc;
calloc_type calloc;
valloc_type valloc;
free_type free;
realloc_type realloc;
memalign_type memalign;
batch_malloc_type batch_malloc;
batch_free_type batch_free;
free_definite_size_type free_definite_size;
size_fn_type size;
const ChromeMallocZone* context;
};
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
MallocZoneFunctions* functions);
static constexpr int kMaxZoneCount = 30;
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
// The array g_malloc_zones stores all information about malloc zones before
// they are shimmed. This information needs to be accessed during dispatch back
// into the zone, and additional zones may be added later in the execution fo
// the program, so the array needs to be both thread-safe and high-performance.
//
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
// never modify the container, which provides thread-safety to iterators. When
// we want to add a MallocZoneFunctions to the container, we:
// 1. Fill in all the fields.
// 2. Update the total zone count.
// 3. Insert a memory barrier.
// 4. Insert our shim.
//
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
// pointer to the original malloc zone. When we wish to dispatch back to the
// original malloc zones, we iterate through the array, looking for a matching
// |context|.
//
// Most allocations go through the default allocator. We will ensure that the
// default allocator is stored as the first MallocZoneFunctions.
//
// Returns whether the zone was successfully stored.
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
ChromeMallocZone* zone,
const MallocZoneFunctions* functions);
BASE_EXPORT int GetMallocZoneCountForTesting();
BASE_EXPORT void ClearAllMallocZonesForTesting();
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
if (g_malloc_zones[i].context == zone)
return g_malloc_zones[i];
}
IMMEDIATE_CRASH();
}
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_

View File

@ -0,0 +1,115 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_alloc_features.h"
#include "base/feature_list.h"
#include "build/build_config.h"
namespace base {
namespace features {
#if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
const Feature kPartitionAllocPCScanBrowserOnly{
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
// If enabled, PCScan is turned on only for the renderer's malloc partition.
const Feature kPartitionAllocPCScanRendererOnly{
"PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
// If enabled, this instance belongs to the Control group of the BackupRefPtr
// binary experiment.
const Feature kPartitionAllocBackupRefPtrControl{
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
// Use a larger maximum thread cache cacheable bucket size.
const Feature kPartitionAllocLargeThreadCacheSize{
"PartitionAllocLargeThreadCacheSize",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
// Not unconditionally enabled on 32 bit Android, since it is a more
// memory-constrained platform.
FEATURE_DISABLED_BY_DEFAULT
#else
FEATURE_ENABLED_BY_DEFAULT
#endif
};
const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
"PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
const Feature kPartitionAllocBackupRefPtr{"PartitionAllocBackupRefPtr",
FEATURE_DISABLED_BY_DEFAULT};
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = {
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
"browser-and-renderer"},
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam{
&kPartitionAllocBackupRefPtr, "enabled-processes",
BackupRefPtrEnabledProcesses::kBrowserOnly,
&kBackupRefPtrEnabledProcessesOptions};
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
"disabled-but-3-way-split"},
};
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
&kBackupRefPtrModeOptions};
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, switches the bucket distribution to an alternate one. The
// alternate distribution must have buckets that are a subset of the default
// one.
const Feature kPartitionAllocUseAlternateDistribution{
"PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
const Feature kPartitionAllocPCScanMUAwareScheduler{
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
const Feature kPartitionAllocPCScanImmediateFreeing{
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
// If enabled, PCScan clears eagerly (synchronously) on free().
const Feature kPartitionAllocPCScanEagerClearing{
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
// In addition to heap, scan also the stack of the current mutator.
const Feature kPartitionAllocPCScanStackScanning {
"PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
};
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT};
} // namespace features
} // namespace base

View File

@ -0,0 +1,77 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/metrics/field_trial_params.h"
namespace base {
struct Feature;
namespace features {
#if defined(PA_ALLOW_PCSCAN)
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
extern const BASE_EXPORT Feature kPartitionAllocPCScanRendererOnly;
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
extern const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing;
enum class BackupRefPtrEnabledProcesses {
// BRP enabled only in the browser process.
kBrowserOnly,
// BRP enabled only in the browser and renderer processes.
kBrowserAndRenderer,
// BRP enabled in all processes, except renderer.
kNonRenderer,
// BRP enabled in all processes.
kAllProcesses,
};
enum class BackupRefPtrMode {
// BRP is disabled across all partitions. Equivalent to the Finch flag being
// disabled.
kDisabled,
// BRP is enabled in the main partition, as well as certain Renderer-only
// partitions (if enabled in Renderer at all).
// This entails splitting the main partition.
kEnabled,
// BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode.
kDisabledButSplitPartitions2Way,
// BRP is disabled, but the main partition *and* aligned partition are split
// out, as if BRP was enabled in the "before allocation" mode.
kDisabledButSplitPartitions3Way,
};
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtr;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
kBackupRefPtrModeParam;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
extern const BASE_EXPORT Feature kPartitionAllocDCScan;
extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
extern const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution;
} // namespace features
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View File

@ -0,0 +1,438 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_alloc_support.h"
#include <array>
#include <cstdint>
#include <map>
#include <string>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/check.h"
#include "base/debug/stack_trace.h"
#include "base/feature_list.h"
#include "base/immediate_crash.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/strings/stringprintf.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "base/trace_event/base_tracing.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace base {
namespace allocator {
namespace {
#if defined(PA_ALLOW_PCSCAN)
constexpr const char* ScannerIdToTracingString(
internal::StatsCollector::ScannerId id) {
switch (id) {
case internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear";
case internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan";
case internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep";
case internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner";
case internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable();
}
}
constexpr const char* MutatorIdToTracingString(
internal::StatsCollector::MutatorId id) {
switch (id) {
case internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear";
case internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack";
case internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan";
case internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator";
case internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable();
}
}
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public StatsReporter {
public:
void ReportTraceEvent(internal::StatsCollector::ScannerId id,
[[maybe_unused]] const PlatformThreadId tid,
TimeTicks start_time,
TimeTicks end_time) override {
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = ScannerIdToTracingString(id);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
}
void ReportTraceEvent(internal::StatsCollector::MutatorId id,
[[maybe_unused]] const PlatformThreadId tid,
TimeTicks start_time,
TimeTicks end_time) override {
// TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time.
const char* tracing_id = MutatorIdToTracingString(id);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time);
}
void ReportSurvivedQuarantineSize(size_t survived_size) override {
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
survived_size);
}
void ReportSurvivedQuarantinePercent(double survived_rate) override {
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
// divide back.
// TODO(bikineev): Remove after switching to perfetto.
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
1000 * survived_rate);
}
void ReportStats(const char* stats_name, TimeDelta sample) override {
UmaHistogramTimes(stats_name, sample);
}
private:
static constexpr char kTraceCategory[] = "partition_alloc";
};
#endif // defined(PA_ALLOW_PCSCAN)
} // namespace
#if defined(PA_ALLOW_PCSCAN)
void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter;
static bool registered = false;
DCHECK(!registered);
internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // defined(PA_ALLOW_PCSCAN)
namespace {
void RunThreadCachePeriodicPurge() {
TRACE_EVENT0("memory", "PeriodicPurge");
auto& instance = internal::ThreadCacheRegistry::Instance();
instance.RunPeriodicPurge();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
}
void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
{
// Micros, since memory reclaiming should typically take at most a few ms.
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
instance->ReclaimNormal();
}
TimeDelta delay =
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
task_runner->PostDelayedTask(
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
}
} // namespace
void StartThreadCachePeriodicPurge() {
auto& instance = internal::ThreadCacheRegistry::Instance();
TimeDelta delay =
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
}
void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
// Can be called several times.
static bool is_memory_reclaimer_running = false;
if (is_memory_reclaimer_running)
return;
is_memory_reclaimer_running = true;
// The caller of the API fully controls where running the reclaim.
// However there are a few reasons to recommend that the caller runs
// it on the main thread:
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
// is more likely in cache when executing on the main thread.
// - Memory reclaim takes the partition lock for each partition. As a
// consequence, while reclaim is running, the main thread is unlikely to be
// able to make progress, as it would be waiting on the lock.
// - Finally, this runs in idle time only, so there should be no visible
// impact.
//
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
// seconds is useful. Since this is meant to run during idle time only, it is
// a reasonable starting point balancing effectivenes vs cost. See
// crbug.com/942512 for details and experimental results.
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
TimeDelta delay =
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
task_runner->PostDelayedTask(
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
}
std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
std::map<std::string, std::string> trials;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// BackupRefPtr_Effective and PCScan_Effective record whether or not
// BackupRefPtr and/or PCScan are enabled. The experiments aren't independent,
// so having a synthetic Finch will help look only at cases where one isn't
// affected by the other.
// Whether PartitionAllocBackupRefPtr is enabled (as determined by
// FeatureList::IsEnabled).
[[maybe_unused]] bool brp_finch_enabled = false;
// Whether PartitionAllocBackupRefPtr is set up for the default behavior. The
// default behavior is when either the Finch flag is disabled, or is enabled
// in brp-mode=disabled (these two options are equivalent).
[[maybe_unused]] bool brp_nondefault_behavior = false;
// Whether PartitionAllocBackupRefPtr is set up to enable BRP protection. It
// requires the Finch flag to be enabled and brp-mode!=disabled*. Some modes,
// e.g. disabled-but-3-way-split, do something (hence can't be considered the
// default behavior), but don't enable BRP protection.
[[maybe_unused]] bool brp_truly_enabled = false;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
if (FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr))
brp_finch_enabled = true;
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() !=
features::BackupRefPtrMode::kDisabled)
brp_nondefault_behavior = true;
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() ==
features::BackupRefPtrMode::kEnabled)
brp_truly_enabled = true;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
[[maybe_unused]] bool pcscan_enabled =
#if defined(PA_ALLOW_PCSCAN)
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
#else
false;
#endif
std::string brp_group_name = "Unavailable";
#if BUILDFLAG(USE_BACKUP_REF_PTR)
if (pcscan_enabled) {
// If PCScan is enabled, just ignore the population.
brp_group_name = "Ignore_PCScanIsOn";
} else if (!brp_finch_enabled) {
// The control group is actually disguised as "enabled", but in fact it's
// disabled using a param. This is to differentiate the population that
// participates in the control group, from the population that isn't in any
// group.
brp_group_name = "Ignore_NoGroup";
} else {
switch (features::kBackupRefPtrModeParam.Get()) {
case features::BackupRefPtrMode::kDisabled:
brp_group_name = "Disabled";
break;
case features::BackupRefPtrMode::kEnabled:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlot";
#else
brp_group_name = "EnabledBeforeAlloc";
#endif
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
brp_group_name = "DisabledBut2WaySplit";
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
brp_group_name = "DisabledBut3WaySplit";
break;
}
if (features::kBackupRefPtrModeParam.Get() !=
features::BackupRefPtrMode::kDisabled) {
std::string process_selector;
switch (features::kBackupRefPtrEnabledProcessesParam.Get()) {
case features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_selector = "BrowserOnly";
break;
case features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_selector = "BrowserAndRenderer";
break;
case features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_selector = "NonRenderer";
break;
case features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_selector = "AllProcesses";
break;
}
brp_group_name += ("_" + process_selector);
}
}
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
trials.emplace("BackupRefPtr_Effective", brp_group_name);
// On 32-bit architectures, PCScan is not supported and permanently disabled.
// Don't lump it into "Disabled", so that belonging to "Enabled"/"Disabled" is
// fully controlled by Finch and thus have identical population sizes.
std::string pcscan_group_name = "Unavailable";
std::string pcscan_group_name_fallback = "Unavailable";
#if defined(PA_ALLOW_PCSCAN)
if (brp_truly_enabled) {
// If BRP protection is enabled, just ignore the population. Check
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes
// where BRP protection is actually disabled.
pcscan_group_name = "Ignore_BRPIsOn";
} else {
pcscan_group_name = (pcscan_enabled ? "Enabled" : "Disabled");
}
// In case we are incorrect that PCScan is independent of partition-split
// modes, create a fallback trial that only takes into account the BRP Finch
// settings that preserve the default behavior.
if (brp_nondefault_behavior) {
pcscan_group_name_fallback = "Ignore_BRPIsOn";
} else {
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
}
#endif // defined(PA_ALLOW_PCSCAN)
trials.emplace("PCScan_Effective", pcscan_group_name);
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
trials.emplace("FakeBinaryExperiment",
#if BUILDFLAG(USE_FAKE_BINARY_EXPERIMENT)
"Enabled"
#else
"Disabled"
#endif
);
return trials;
}
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
namespace {
internal::PartitionLock g_stack_trace_buffer_lock;
struct StackTraceWithID {
debug::StackTrace stack_trace;
uintptr_t id = 0;
};
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
void DanglingRawPtrDetected(uintptr_t id) {
// This is called from inside the allocator. No allocation is allowed.
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
#if DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer)
PA_DCHECK(!entry || entry->id != id);
#endif // DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (!entry) {
entry = {debug::StackTrace(), id};
return;
}
}
// The StackTrace hasn't been recorded, because the buffer isn't large
// enough.
}
void DanglingRawPtrReleased(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
absl::optional<std::string> stack_trace_free;
std::string stack_trace_release = base::debug::StackTrace().ToString();
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (entry && entry->id == id) {
stack_trace_free = entry->stack_trace.ToString();
entry = absl::nullopt;
break;
}
}
if (stack_trace_free) {
LOG(ERROR) << base::StringPrintf(
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
":\n\n"
"The memory was freed at:\n%s\n"
"The dangling raw_ptr was released at:\n%s",
id, stack_trace_free->c_str(), stack_trace_release.c_str());
} else {
LOG(ERROR) << base::StringPrintf(
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
":\n\n"
"It was not recorded where the memory was freed.\n\n"
"The dangling raw_ptr was released at:\n%s",
id, stack_trace_release.c_str());
}
IMMEDIATE_CRASH();
}
void ClearDanglingRawPtrBuffer() {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
g_stack_trace_buffer = DanglingRawPtrBuffer();
}
} // namespace
void InstallDanglingRawPtrChecks() {
// Clearing storage is useful for running multiple unit tests without
// restarting the test executable.
ClearDanglingRawPtrBuffer();
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleased);
}
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
// is a dangling pointer, we should crash at some point. Consider providing an
// API to periodically check the buffer.
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
void InstallDanglingRawPtrChecks() {}
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,42 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
#include <map>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/memory/scoped_refptr.h"
#include "base/task/sequenced_task_runner.h"
namespace base {
namespace allocator {
#if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
// Starts a periodic timer on the current thread to purge all thread caches.
BASE_EXPORT void StartThreadCachePeriodicPurge();
BASE_EXPORT void StartMemoryReclaimer(
scoped_refptr<SequencedTaskRunner> task_runner);
BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
// Install handlers for when dangling raw_ptr(s) have been detected. This prints
// two StackTraces. One where the memory is freed, one where the last dangling
// raw_ptr stopped referencing it.
//
// This is currently effective, only when compiled with
// `enable_dangling_raw_ptr_checks` build flag.
BASE_EXPORT void InstallDanglingRawPtrChecks();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_

View File

@ -0,0 +1,57 @@
# It's planned that PartitionAlloc will be a stand-alone third party library
# and dependencies need to be strictly controlled and minimized.
noparent = True
include_rules = [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/buildflags.h",
"+base/base_export.h",
"+base/bind.h",
"+base/bits.h",
"+base/callback.h",
"+base/check.h",
"+base/check_op.h",
"+base/compiler_specific.h",
"+base/cpu.h",
"+base/cxx17_backports.h",
"+base/dcheck_is_on.h",
"+base/debug/alias.h",
"+base/debug/proc_maps_linux.h",
"+base/files/file_path.h",
"+base/fuchsia/fuchsia_logging.h",
"+base/gtest_prod_util.h",
"+base/immediate_crash.h",
"+base/lazy_instance.h",
"+base/location.h",
"+base/logging.h",
"+base/logging_buildflags.h",
"+base/mac/foundation_util.h",
"+base/mac/mac_util.h",
"+base/mac/scoped_cftyperef.h",
"+base/memory/ref_counted.h",
"+base/memory/scoped_refptr.h",
"+base/native_library.h",
"+base/no_destructor.h",
"+base/posix/eintr_wrapper.h",
"+base/process/memory.h",
"+base/rand_util.h",
"+base/strings/stringprintf.h",
"+base/sys_byteorder.h",
"+base/system/sys_info.h",
"+base/test/bind.h",
"+base/test/gtest_util.h",
"+base/thread_annotations.h",
"+base/threading/platform_thread.h",
"+base/time/time.h",
"+base/time/time_override.h",
"+base/timer/lap_timer.h",
"+base/trace_event/base_tracing.h",
"+base/win/windows_types.h",
"+base/win/windows_version.h",
"+build/build_config.h",
"+build/buildflag.h",
"+build/chromecast_buildflags.h",
"+testing/gtest/include/gtest/gtest.h",
"+testing/perf/perf_result_reporter.h",
]

View File

@ -0,0 +1,6 @@
monorail {
component: "Blink>MemoryAllocator>Partition"
}
# Also security-dev@chromium.org
team_email: "platform-architecture-dev@chromium.org"

View File

@ -0,0 +1,3 @@
bartekn@chromium.org
haraken@chromium.org
lizeb@chromium.org

View File

@ -0,0 +1,269 @@
# PartitionAlloc Design
This document describes PartitionAlloc at a high level, with some architectural
details. For implementation details, see the comments in
`partition_alloc_constants.h`.
## Overview
PartitionAlloc is a memory allocator optimized for space efficiency,
allocation latency, and security.
### Core terms
A *partition* is a heap that is separated and protected from any other
partitions, as well as from non-PartitionAlloc memory. The most typical use of
partitions is to isolate certain object types. However, one can also isolate
objects of certain sizes, or objects of a certain lifetime (as the caller
prefers). Callers can create as many partitions as they need. The direct
memory cost of partitions is minimal, but the implicit cost resulting from
fragmentation is not to be underestimated.
Each partition holds multiple buckets. A *bucket* is a collection of regions in
a partition that contains similar-sized objects, e.g. one bucket holds sizes
(224,&nbsp;256], another (256,&nbsp;320], and so on. Bucket sizes are
geometrically-spaced, and go all the way up to `kMaxBucketed`, which is a tad
under 1MiB (so called *normal buckets*). There are tens of buckets, 4 between
each power of two (except for lower sizes where buckets that aren't a multiple
of `base::kAlignment` simply don't exist).
Larger allocations (&gt;`kMaxBucketed`) are realized by direct memory mapping
(*direct map*).
### Performance
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
paths of allocation and deallocation require very few (reasonably predictable)
branches. The number of operations in the fast paths is minimal, leading to the
possibility of inlining.
However, even the fast path isn't the fastest, because it requires taking
a per-partition lock. Although we optimized the lock, there was still room for
improvement. Therefore we introduced the *thread cache*, which holds a small
amount of not-too-large memory chunks, ready to be allocated. Because these
chunks are stored per-thread, they can be allocated without a lock, only
requiring a faster thread-local storage (TLS) lookup, improving cache locality
in the process.
The thread cache has been tailored to satisfy a vast majority of requests by
allocating from and releasing memory to the main allocator in batches,
amortizing lock acquisition and further improving locality while not trapping
excess memory.
### Security
Security is one of the important goals of PartitionAlloc.
PartitionAlloc guarantees that different partitions exist in different regions
of the process's address space. When the caller has freed all objects contained
in a page in a partition, PartitionAlloc returns the physical memory to the
operating system, but continues to reserve the region of address space.
PartitionAlloc will only reuse an address space region for the same partition.
Similarly, one page can contain only objects from the same bucket.
When freed, PartitionAlloc returns the physical memory, but continues to reserve
the region for this very bucket.
The above techniques help avoid type confusion attacks. Note, however, these
apply only to normal buckets and not to direct map, as it'd waste too much
address space.
PartitionAlloc also guarantees that:
* Linear overflows/underflows cannot corrupt into, out of, or between
partitions. There are guard pages at the beginning and the end of each memory
region owned by a partition.
* Linear overflows/underflows cannot corrupt the allocation metadata.
PartitionAlloc records metadata in a dedicated, out-of-line region (not
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
exception.)
* Partial pointer overwrite of freelist pointer should fault.
* Direct map allocations have guard pages at the beginning and the end.
### Alignment
PartitionAlloc guarantees that returned pointers are aligned on
`base::kAlignment` boundary (typically 16B on 64-bit systems, and 8B on 32-bit).
PartitionAlloc also supports higher levels of alignment, that can be requested
via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
`posix_memalign()`). The requested
alignment has to be a power of two. PartitionAlloc reserves the right to round
up the requested size to the nearest power of two, greater than or equal to the
requested alignment. This may be wasteful, but allows taking advantage of
natural PartitionAlloc alignment guarantees. Allocations with an alignment
requirement greater than `base::kAlignment` are expected to be very rare.
## PartitionAlloc-Everywhere
Originally, PartitionAlloc was used only in Blink (Chromiums rendering engine).
It was invoked explicitly, by calling PartitionAlloc APIs directly.
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
to the entire-ish codebase (exclusions apply). This was done by intercepting
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
routing them into PartitionAlloc. The shim located in
`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
responsible for intercepting. For more details, see
[base/allocator/README.md](../../../base/allocator/README.md).
A special, catch-it-all *Malloc* partition has been created for the intercepted
`malloc()` et al. This is to isolate from already existing Blink partitions.
The only exception from that is Blink's *FastMalloc* partition, which was also
catch-it-all in nature, so it's perfectly fine to merge these together, to
minimize fragmentation.
PartitionAlloc-Everywhere was launched in M89 for Windows 64-bit and Android.
Windows 32-bit and Linux followed it shortly after, in M90.
## Architecture
### Many Different Flavors of Pages
In PartitionAlloc, by *system page* we mean a memory page as defined by CPU/OS
(often referred to as "virtual page" out there). It is most commonly 4KiB in
size, but depending on CPU it can be larger (PartitionAlloc supports up to
64KiB).
The reason why we use the term "system page" is to disambiguate from
*partition page*, which is the most common granularity used by PartitionAlloc.
Each partition page consists of exactly 4 system pages.
A *super page* is a 2MiB region, aligned on a 2MiB boundary.
Don't confuse it with CPU/OS terms like "large page" or "huge page", which are
also commonly 2MiB in size. These have to be fully committed/uncommitted in
memory, whereas super pages can be partially committed, with system page
granularity.
### Slots and Spans
A *slot* is an indivisible allocation unit. Slot sizes are tied to buckets.
For example each allocation that falls into the bucket (240;&nbsp;256] would
be satisfied with a slot of size 256. This applies only to normal buckets, not
to direct map.
A *slot span* is just a grouping of slots of the same size next to each other
in memory. Slot span size is a multiple of a partition page.
A bucket is a collection of slot spans containing slots of the same size,
organized as linked-lists.
Allocations up to 4 partition pages are referred to as *small buckets*.
In these cases, slot spans are always between 1 and 4 partition pages in size.
The size is chosen based on the slot size, such that the rounding waste is
minimized. For example, if the slot size was 96B and slot span was 1 partition
page of 16KiB, 64B would be wasted at the end, but nothing is wasted if 3
partition pages totalling 48KiB are used. Furthermore, PartitionAlloc may avoid
waste by lowering the number of committed system pages compared to the number of
reserved pages. For example, for the slot size of 80B we'd use a slot span of 4
partition pages of 16KiB, i.e. 16 system pages of 4KiB, but commit only up to
15, thus resulting in perfect packing.
Allocations above 4 partition pages (but &le;`kMaxBucketed`) are referred to as
*single slot spans*. That's because each slot span is guaranteed to hold exactly
one slot. Fun fact: there are sizes &le;4 partition pages that result in a slot
span having exactly 1 slot, but nonetheless they're still classified as small
buckets. The reason is that single slot spans are often handled by a different
code path, and that distinction is made purely based on slot size, for
simplicity and efficiency.
### Layout in Memory
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
pages. Each super page is split into partition pages.
The first and the last partition page are permanently inaccessible and serve
as guard pages, with the exception of one system page in the middle of the first
partition page that holds metadata (32B struct per partition page).
![anatomy of a super page](./dot/super-page.png)
* The slot span numbers provide a visual hint of their size (in partition
pages).
* Colors provide a visual hint of the bucket to which the slot span belongs.
* Although only five colors are shown, in reality, a super page holds
tens of slot spans, some of which belong to the same bucket.
* The system page that holds metadata tracks each partition page with one 32B
[`PartitionPage` struct][PartitionPage], which is either
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
diagram).
* Gray fill denotes guard pages (one partition page each at the head and tail
of each super page).
As allocation requests arrive, there is eventually a need to allocate a new slot
span.
Address space for such a slot span is carved out from the last super page. If
not enough space, a new super page is allocated. Due to varying sizes of slot
span, this may lead to leaving space unused (we never go back to fill previous
super pages), which is fine because this memory is merely reserved, which is far
less precious than committed memory. Note also that address space reserved for a
slot span is never released, even if the slot span isn't used for a long time.
All slots in a newly allocated slot span are *free*, i.e. available for
allocation.
### Freelist Pointers
All free slots within a slot span are chained into a singly-linked free-list,
by writing the *next* pointer at the beginning of each slot, and the head of the
list is written in the metadata struct.
However, writing a pointer in each free slot of a newly allocated span would
require committing and faulting in physical pages upfront, which would be
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
Only provisioned slots are chained into the freelist.
Once provisioned slots in a span are depleted, then another page worth of slots
is provisioned (note, a slot that crosses a page boundary only gets
provisioned with slots of the next page). See
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
Freelist pointers are stored at the beginning of each free slot. As such, they
are the only metadata that is inline, i.e. stored among the
objects. This makes them prone to overruns. On little-endian systems, the
pointers are encoded by reversing byte order, so that partial overruns will very
likely result in destroying the pointer, as opposed to forming a valid pointer
to a nearby location.
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
different manner. This helps PartitionAlloc detect corruptions.
### Slot Span States
A slot span can be in any of 4 states:
* *Full*. A full span has no free slots.
* *Empty*. An empty span has no allocated slots, only free slots.
* *Active*. An active span is anything in between the above two.
* *Decommitted*. A decommitted span is a special case of an empty span, where
all pages are decommitted from memory.
PartitionAlloc prioritizes getting an available slot from an active span, over
an empty one, in hope that the latter can be soon transitioned into a
decommitted state, thus releasing memory. There is no mechanism, however, to
prioritize selection of a slot span based on the number of already allocated
slots.
An empty span becomes decommitted either when there are too many empty spans
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
low memory pressure conditions). An allocation can be satisfied from
a decommitted span if there are no active or empty spans available. The slot
provisioning mechanism kicks back in, committing the pages gradually as needed,
and the span becomes active. (There is currently no other way
to unprovision slots than decommitting the entire span).
As mentioned above, a bucket is a collection of slot spans containing slots of
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
There is no need for a full span list. The lists are updated lazily. An empty,
decommitted or full span may stay on the active list for some time, until
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
A decommitted span may stay on the empty list for some time,
until `PartitionBucket<thread_safe>::SlowPathAlloc()` encounters it. However,
the inaccuracy can't happen in the other direction, i.e. an active span can only
be on the active list, and an empty span can only be on the active or empty
list.
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4

View File

@ -0,0 +1,552 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <limits>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_space_stats.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/lazy_instance.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include <sys/mman.h>
#endif
namespace partition_alloc::internal {
namespace {
base::LazyInstance<AddressPoolManager>::Leaky g_address_pool_manager =
LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
AddressPoolManager* AddressPoolManager::GetInstance() {
return g_address_pool_manager.Pointer();
}
#if defined(PA_HAS_64_BITS_POINTERS)
namespace {
// This will crash if the range cannot be decommitted.
void DecommitPages(uintptr_t address, size_t size) {
// Callers rely on the pages being zero-initialized when recommitting them.
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
// particular on macOS, but |DecommitAndZeroSystemPages| does.
DecommitAndZeroSystemPages(address, size);
}
} // namespace
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
for (pool_handle i = 0; i < std::size(pools_); ++i) {
if (!pools_[i].IsInitialized()) {
pools_[i].Initialize(ptr, length);
return i + 1;
}
}
PA_NOTREACHED();
return 0;
}
void AddressPoolManager::GetPoolUsedSuperPages(
pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used) {
Pool* pool = GetPool(handle);
if (!pool)
return;
pool->GetUsedSuperPages(used);
}
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
Pool* pool = GetPool(handle);
if (!pool)
return 0;
return pool->GetBaseAddress();
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < std::size(pools_); ++i)
pools_[i].Reset();
}
void AddressPoolManager::Remove(pool_handle handle) {
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
pool->Reset();
}
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
Pool* pool = GetPool(handle);
if (!requested_address)
return pool->FindChunk(length);
const bool is_available = pool->TryReserveChunk(requested_address, length);
if (is_available)
return requested_address;
return pool->FindChunk(length);
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
DecommitPages(address, length);
pool->FreeChunk(address, length);
}
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(ptr != 0);
PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr;
#if DCHECK_IS_ON()
address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_);
#endif
total_bits_ = length / kSuperPageSize;
PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
ScopedGuard scoped_lock(lock_);
alloc_bitset_.reset();
bit_hint_ = 0;
}
bool AddressPoolManager::Pool::IsInitialized() {
return address_begin_ != 0;
}
void AddressPoolManager::Pool::Reset() {
address_begin_ = 0;
}
void AddressPoolManager::Pool::GetUsedSuperPages(
std::bitset<kMaxSuperPagesInPool>& used) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(IsInitialized());
used = alloc_bitset_;
}
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
PA_DCHECK(IsInitialized());
return address_begin_;
}
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t need_bits = requested_size >> kSuperPageShift;
// Use first-fit policy to find an available chunk from free chunks. Start
// from |bit_hint_|, because we know there are no free chunks before.
size_t beg_bit = bit_hint_;
size_t curr_bit = bit_hint_;
while (true) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_)
return 0;
bool found = true;
for (; curr_bit < end_bit; ++curr_bit) {
if (alloc_bitset_.test(curr_bit)) {
// The bit was set, so this chunk isn't entirely free. Set |found=false|
// to ensure the outer loop continues. However, continue the inner loop
// to set |beg_bit| just past the last set bit in the investigated
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
// next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1;
found = false;
if (bit_hint_ == curr_bit)
++bit_hint_;
}
}
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
// mark as allocated) and return the allocated address.
if (found) {
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(!alloc_bitset_.test(i));
alloc_bitset_.set(i);
}
if (bit_hint_ == beg_bit) {
bit_hint_ = end_bit;
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if DCHECK_IS_ON()
PA_DCHECK(address + requested_size <= address_end_);
#endif
return address;
}
}
PA_NOTREACHED();
return 0;
}
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
size_t requested_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
const size_t need_bits = requested_size / kSuperPageSize;
const size_t end_bit = begin_bit + need_bits;
// Check that requested address is not too high.
if (end_bit > total_bits_)
return false;
// Check if any bit of the requested region is set already.
for (size_t i = begin_bit; i < end_bit; ++i) {
if (alloc_bitset_.test(i))
return false;
}
// Otherwise, set the bits.
for (size_t i = begin_bit; i < end_bit; ++i) {
alloc_bitset_.set(i);
}
return true;
}
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
ScopedGuard scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address);
#if DCHECK_IS_ON()
PA_DCHECK(address + free_size <= address_end_);
#endif
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(alloc_bitset_.test(i));
alloc_bitset_.reset(i);
}
bit_hint_ = std::min(bit_hint_, beg_bit);
}
void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
std::bitset<kMaxSuperPagesInPool> pages;
size_t i;
{
ScopedGuard scoped_lock(lock_);
pages = alloc_bitset_;
i = bit_hint_;
}
stats->usage = pages.count();
size_t largest_run = 0;
size_t current_run = 0;
for (; i < total_bits_; ++i) {
if (!pages[i]) {
current_run += 1;
continue;
} else if (current_run > largest_run) {
largest_run = current_run;
}
current_run = 0;
}
// Fell out of the loop with last bit being zero. Check once more.
if (current_run > largest_run) {
largest_run = current_run;
}
stats->largest_available_reservation = largest_run;
}
AddressPoolManager::Pool::Pool() = default;
AddressPoolManager::Pool::~Pool() = default;
void AddressPoolManager::GetPoolStats(const pool_handle handle,
PoolStats* stats) {
Pool* pool = GetPool(handle);
if (!pool->IsInitialized()) {
return;
}
pool->GetStats(stats);
}
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 64-bit pool stats.
GetPoolStats(GetRegularPool(), &stats->regular_pool_stats);
#if BUILDFLAG(USE_BACKUP_REF_PTR)
GetPoolStats(GetBRPPool(), &stats->brp_pool_stats);
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
if (IsConfigurablePoolAvailable()) {
GetPoolStats(GetConfigurablePool(), &stats->configurable_pool_stats);
}
return true;
}
#else // defined(PA_HAS_64_BITS_POINTERS)
static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
0,
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
static_assert(
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
"kGuardOffsetOfBRPPoolBitmap.");
template <size_t bitsize>
void SetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(!bitmap.test(i));
bitmap.set(i);
}
}
template <size_t bitsize>
void ResetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(bitmap.test(i));
bitmap.reset(i);
}
}
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
uintptr_t address = AllocPages(requested_address, length, kSuperPageSize,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc);
return address;
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length) {
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
FreePages(address, length);
}
void AddressPoolManager::MarkUsed(pool_handle handle,
uintptr_t address,
size_t length) {
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if BUILDFLAG(USE_BACKUP_REF_PTR)
if (handle == kBRPPoolHandle) {
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
// Make IsManagedByBRPPoolPool() return false when an address inside the
// first or the last PartitionPageSize()-bytes block is given:
//
// ------+---+---------------+---+----
// memory ..... | B | managed by PA | B | ...
// regions ------+---+---------------+---+----
//
// B: PartitionPageSize()-bytes block. This is used internally by the
// allocator and is not available for callers.
//
// This is required to avoid crash caused by the following code:
// {
// // Assume this allocation happens outside of PartitionAlloc.
// raw_ptr<T> ptr = new T[20];
// for (size_t i = 0; i < 20; i ++) { ptr++; }
// // |ptr| may point to an address inside 'B'.
// }
//
// Suppose that |ptr| points to an address inside B after the loop. If
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
// crash, since the memory is not allocated by PartitionAlloc.
SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
{
PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
0);
SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
}
}
void AddressPoolManager::MarkUnused(pool_handle handle,
uintptr_t address,
size_t length) {
// Address regions allocated for normal buckets are never released, so this
// function can only be called for direct map. However, do not DCHECK on
// IsManagedByDirectMap(address), because many tests test this function using
// small allocations.
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if BUILDFLAG(USE_BACKUP_REF_PTR)
if (handle == kBRPPoolHandle) {
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
// Make IsManagedByBRPPoolPool() return false when an address inside the
// first or the last PartitionPageSize()-bytes block is given.
// (See MarkUsed comment)
ResetBitmap(
AddressPoolManagerBitmap::brp_pool_bits_,
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
} else
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
{
PA_DCHECK(handle == kRegularPoolHandle);
PA_DCHECK(
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
0);
ResetBitmap(
AddressPoolManagerBitmap::regular_pool_bits_,
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
}
}
void AddressPoolManager::ResetForTesting() {
ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
AddressPoolManagerBitmap::regular_pool_bits_.reset();
AddressPoolManagerBitmap::brp_pool_bits_.reset();
}
namespace {
// Counts super pages in use represented by `bitmap`.
template <size_t bitsize>
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
const size_t bits_per_super_page) {
size_t count = 0;
size_t bit_index = 0;
// Stride over super pages.
for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
// Stride over the bits comprising the super page.
for (bit_index = super_page_index * bits_per_super_page;
bit_index < (super_page_index + 1) * bits_per_super_page &&
bit_index < bitsize;
++bit_index) {
if (bitmap[bit_index]) {
count += 1;
// Move on to the next super page.
break;
}
}
}
return count;
}
} // namespace
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
{
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
} // scoped_lock
// Pool usage is read out from the address pool bitmaps.
// The output stats are sized in super pages, so we interpret
// the bitmaps into super page usage.
static_assert(
kSuperPageSize %
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
0,
"information loss when calculating metrics");
constexpr size_t kRegularPoolBitsPerSuperPage =
kSuperPageSize /
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
// Get 32-bit pool usage.
stats->regular_pool_stats.usage =
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
#if BUILDFLAG(USE_BACKUP_REF_PTR)
static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
0,
"information loss when calculating metrics");
constexpr size_t kBRPPoolBitsPerSuperPage =
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
stats->brp_pool_stats.usage =
CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
// Get blocklist size.
for (const auto& blocked :
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
if (blocked.load(std::memory_order_relaxed))
stats->blocklist_size += 1;
}
// Count failures in finding non-blocklisted addresses.
stats->blocklist_hit_count =
AddressPoolManagerBitmap::blocklist_hit_count_.load(
std::memory_order_relaxed);
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
return true;
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{};
if (GetStats(&stats)) {
dumper->DumpStats(&stats);
}
}
AddressPoolManager::AddressPoolManager() = default;
AddressPoolManager::~AddressPoolManager() = default;
} // namespace partition_alloc::internal

View File

@ -0,0 +1,189 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#include <bitset>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
namespace base {
template <typename Type>
struct LazyInstanceTraitsBase;
} // namespace base
namespace partition_alloc {
class AddressSpaceStatsDumper;
struct AddressSpaceStats;
struct PoolStats;
} // namespace partition_alloc
namespace partition_alloc::internal {
// (64bit version)
// AddressPoolManager takes a reserved virtual address space and manages address
// space allocation.
//
// AddressPoolManager (currently) supports up to 3 pools. Each pool manages a
// contiguous reserved address space. Alloc() takes a pool_handle and returns
// address regions from the specified pool. Free() also takes a pool_handle and
// returns the address region back to the manager.
//
// (32bit version)
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
// address regions using bitmaps. IsManagedByPartitionAllocBRPPool and
// IsManagedByPartitionAllocRegularPool use the bitmaps to judge whether a given
// address is in a pool that supports BackupRefPtr or in a pool that doesn't.
// All PartitionAlloc allocations must be in either of the pools.
class BASE_EXPORT AddressPoolManager {
public:
static AddressPoolManager* GetInstance();
AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if defined(PA_HAS_64_BITS_POINTERS)
pool_handle Add(uintptr_t address, size_t length);
void Remove(pool_handle handle);
// Populate a |used| bitset of superpages currently in use.
void GetPoolUsedSuperPages(pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used);
// Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif
// Reserves address space from GigaCage.
uintptr_t Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length);
// Frees address space back to GigaCage and decommits underlying system pages.
void UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length);
void ResetForTesting();
#if !defined(PA_HAS_64_BITS_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
static bool IsManagedByRegularPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
}
static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
}
#endif // !defined(PA_HAS_64_BITS_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper);
private:
friend class AddressPoolManagerForTesting;
AddressPoolManager();
~AddressPoolManager();
// Populates `stats` if applicable.
// Returns whether `stats` was populated. (They might not be, e.g.
// if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats);
#if defined(PA_HAS_64_BITS_POINTERS)
class Pool {
public:
Pool();
~Pool();
void Initialize(uintptr_t ptr, size_t length);
bool IsInitialized();
void Reset();
uintptr_t FindChunk(size_t size);
void FreeChunk(uintptr_t address, size_t size);
bool TryReserveChunk(uintptr_t address, size_t size);
void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
uintptr_t GetBaseAddress();
void GetStats(PoolStats* stats);
private:
Lock lock_;
// The bitset stores the allocation state of the address pool. 1 bit per
// super-page: 1 = allocated, 0 = free.
std::bitset<kMaxSuperPagesInPool> alloc_bitset_ GUARDED_BY(lock_);
// An index of a bit in the bitset before which we know for sure there all
// 1s. This is a best-effort hint in the sense that there still may be lots
// of 1s after this index, but at least we know there is no point in
// starting the search before it.
size_t bit_hint_ GUARDED_BY(lock_);
size_t total_bits_ = 0;
uintptr_t address_begin_ = 0;
#if DCHECK_IS_ON()
uintptr_t address_end_ = 0;
#endif
};
ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
return &pools_[handle - 1];
}
// Gets the stats for the pool identified by `handle`, if
// initialized.
void GetPoolStats(pool_handle handle, PoolStats* stats);
Pool pools_[kNumPools];
#endif // defined(PA_HAS_64_BITS_POINTERS)
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
};
ALWAYS_INLINE pool_handle GetRegularPool() {
return kRegularPoolHandle;
}
ALWAYS_INLINE pool_handle GetBRPPool() {
return kBRPPoolHandle;
}
ALWAYS_INLINE pool_handle GetConfigurablePool() {
PA_DCHECK(IsConfigurablePoolAvailable());
return kConfigurablePoolHandle;
}
} // namespace partition_alloc::internal
namespace base::internal {
using ::partition_alloc::internal::AddressPoolManager;
using ::partition_alloc::internal::GetBRPPool;
using ::partition_alloc::internal::GetConfigurablePool;
using ::partition_alloc::internal::GetRegularPool;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -0,0 +1,37 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc::internal {
namespace {
Lock g_lock;
} // namespace
Lock& AddressPoolManagerBitmap::GetLock() {
return g_lock;
}
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
#if BUILDFLAG(USE_BACKUP_REF_PTR)
std::array<std::atomic_bool,
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} // namespace partition_alloc::internal
#endif // !defined(PA_HAS_64_BITS_POINTERS)

View File

@ -0,0 +1,205 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#include <array>
#include <atomic>
#include <bitset>
#include <limits>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc {
namespace internal {
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
// support it. All PartitionAlloc allocations must be in either of the pools.
//
// This code is specific to 32-bit systems.
class BASE_EXPORT AddressPoolManagerBitmap {
public:
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
// For BRP pool, we use partition page granularity to eliminate the guard
// pages from the bitmap at the ends:
// - Eliminating the guard page at the beginning is needed so that pointers
// to the end of an allocation that immediately precede a super page in BRP
// pool don't accidentally fall into that pool.
// - Eliminating the guard page at the end is to ensure that the last page
// of the address space isn't in the BRP pool. This allows using sentinels
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
// on an invalid address. (Note, 64-bit systems don't have this problem as
// the upper half of the address space always belongs to the OS.)
//
// Note, direct map allocations also belong to this pool. The same logic as
// above applies. It is important to note, however, that the granularity used
// here has to be a minimum of partition page size and direct map allocation
// granularity. Since DirectMapAllocationGranularity() is no smaller than
// PageAllocationGranularity(), we don't need to decrease the bitmap
// granularity any further.
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
"");
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
static constexpr size_t kBRPPoolBits =
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
// Regular pool may include both normal bucket and direct map allocations, so
// the bitmap granularity has to be at least as small as
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
// the granularity to partition page size.
static constexpr size_t kBitShiftOfRegularPoolBitmap =
DirectMapAllocationGranularityShift();
static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
DirectMapAllocationGranularity();
static_assert(kBytesPer1BitOfRegularPoolBitmap ==
1 << kBitShiftOfRegularPoolBitmap,
"");
static constexpr size_t kRegularPoolBits =
kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
// Returns false for nullptr.
static bool IsManagedByRegularPool(uintptr_t address) {
static_assert(
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
regular_pool_bits_.size(),
"The bitmap is too small, will result in unchecked out of bounds "
"accesses.");
// It is safe to read |regular_pool_bits_| without a lock since the caller
// is responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return TS_UNCHECKED_READ(
regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
}
// Returns false for nullptr.
static bool IsManagedByBRPPool(uintptr_t address) {
static_assert(std::numeric_limits<uintptr_t>::max() >>
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
"The bitmap is too small, will result in unchecked out of "
"bounds accesses.");
// It is safe to read |brp_pool_bits_| without a lock since the caller
// is responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return TS_UNCHECKED_READ(
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
}
#if BUILDFLAG(USE_BACKUP_REF_PTR)
static void BanSuperPageFromBRPPool(uintptr_t address) {
brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
true, std::memory_order_relaxed);
}
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
// The only potentially dangerous scenario, in which this check is used, is
// when the assignment of the first raw_ptr<T> object for a non-GigaCage
// address is racing with the allocation of a new GigCage super-page at the
// same address. We assume that if raw_ptr<T> is being initialized with a
// raw pointer, the associated allocation is "alive"; otherwise, the issue
// should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
// In the worst case, when such a fix is impossible, we should just undo the
// raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
// above assumption holds, the existing allocation will prevent us from
// reserving the super-page region and, thus, having the race condition.
// Since we rely on that external synchronization, the relaxed memory
// ordering should be sufficient.
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
std::memory_order_relaxed);
}
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
private:
friend class AddressPoolManager;
static Lock& GetLock();
static std::bitset<kRegularPoolBits> regular_pool_bits_ GUARDED_BY(GetLock());
static std::bitset<kBRPPoolBits> brp_pool_bits_ GUARDED_BY(GetLock());
#if BUILDFLAG(USE_BACKUP_REF_PTR)
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
brp_forbidden_super_page_map_;
static std::atomic_size_t blocklist_hit_count_;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
};
} // namespace internal
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
// No need to add IsManagedByConfigurablePool, because Configurable Pool
// doesn't exist on 32-bit.
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
#endif
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
#endif
;
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
uintptr_t address) {
// The Configurable Pool is only available on 64-bit builds.
return false;
}
ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
// The Configurable Pool is only available on 64-bit builds.
return false;
}
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::IsConfigurablePoolAvailable;
using ::partition_alloc::IsManagedByPartitionAlloc;
using ::partition_alloc::IsManagedByPartitionAllocBRPPool;
using ::partition_alloc::IsManagedByPartitionAllocConfigurablePool;
using ::partition_alloc::IsManagedByPartitionAllocRegularPool;
namespace internal {
using ::partition_alloc::internal::AddressPoolManagerBitmap;
} // namespace internal
} // namespace base
#endif // !defined(PA_HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -0,0 +1,22 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
namespace partition_alloc::internal {
using pool_handle = unsigned;
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::pool_handle;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_

View File

@ -0,0 +1,66 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h> // Must be in front of other Windows header files.
#include <versionhelpers.h>
#endif
namespace partition_alloc {
uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if defined(ARCH_CPU_64_BITS)
random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue());
// The ASLRMask() and ASLROffset() constants will be suitable for the
// OS and build configuration.
#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false;
static bool windows_81_initialized = false;
if (!windows_81_initialized) {
windows_81 = IsWindows8Point1OrGreater();
windows_81_initialized = true;
}
if (!windows_81) {
random &= internal::ASLRMaskBefore8_10();
} else {
random &= internal::ASLRMask();
}
random += internal::ASLROffset();
#else
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#else // defined(ARCH_CPU_32_BITS)
#if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it.
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
if (!is_wow64)
return 0;
#endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // defined(ARCH_CPU_32_BITS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random;
}
} // namespace partition_alloc

View File

@ -0,0 +1,268 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace partition_alloc {
// Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly.
BASE_EXPORT uintptr_t GetRandomPageBase();
namespace internal {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
AslrAddress(uintptr_t mask) {
return mask & PageAllocationGranularityBaseMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
// Turn off formatting, because the thicket of nested ifdefs below is
// incomprehensible without indentation. It is also incomprehensible with
// indentation, but the only other option is a combinatorial explosion of
// *_{win,linux,mac,foo}_{32,64}.h files.
//
// clang-format off
#if defined(ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// We shouldn't allocate system pages at all for sanitizer builds. However,
// we do, and if random hint addresses interfere with address ranges
// hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See
// https://crbug.com/539863.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrAddress(0x007fffffffffULL);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x7e8000000000ULL);
}
#elif BUILDFLAG(IS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Older
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
// and may cause a carry, use 47 and 43 bit masks. See
// http://www.alex-ionescu.com/?p=246
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(47);
}
constexpr ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
return AslrMask(43);
}
// Try not to map pages into the range where Windows loads DLLs by default.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return 0x80000000ULL;
}
#elif BUILDFLAG(IS_APPLE)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
// is destroyed. Using a virtual address space that is too large causes a
// leak of about 1 wired [can never be paged out] page per call to mmap. The
// page is only reclaimed when the process is killed. Confine the hint to a
// 39-bit section of the virtual address space.
//
// This implementation adapted from
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32.
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#if defined(ARCH_CPU_X86_64)
// Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_ARM64)
#if BUILDFLAG(IS_ANDROID)
// Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#else
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
// could cause a carry.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(38);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#endif
#elif defined(ARCH_CPU_PPC64)
#if BUILDFLAG(IS_AIX)
// AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x400000000000ULL);
}
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(42);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#elif defined(ARCH_CPU_S390X)
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(40);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(29);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
#if BUILDFLAG(IS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the
// bottom half of the top half of the address space (that is, the third
// quarter). Because we do not MAP_FIXED, this will be treated only as a
// hint -- the system will not fail to mmap because something else
// happens to already be mapped at our random address. We deliberately
// set the hint high enough to get well above the system's break (that
// is, the heap); Solaris and illumos will try the hint and if that
// fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x80000000ULL);
}
#elif BUILDFLAG(IS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x90000000ULL);
}
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
#endif // BUILDFLAG(IS_POSIX)
#elif defined(ARCH_CPU_32_BITS)
// This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#else
#error Please tell us about your exotic hardware! Sounds interesting.
#endif // defined(ARCH_CPU_32_BITS)
// clang-format on
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_

View File

@ -0,0 +1,52 @@
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
#include <cstddef>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
namespace partition_alloc {
// All members are measured in super pages.
struct PoolStats {
size_t usage = 0;
// On 32-bit, GigaCage is mainly a logical entity, intermingled with
// allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case.
#if defined(PA_HAS_64_BITS_POINTERS)
size_t largest_available_reservation = 0;
#endif // defined(PA_HAS_64_BITS_POINTERS)
};
struct AddressSpaceStats {
PoolStats regular_pool_stats;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
PoolStats brp_pool_stats;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
#if defined(PA_HAS_64_BITS_POINTERS)
PoolStats configurable_pool_stats;
#else
#if BUILDFLAG(USE_BACKUP_REF_PTR)
size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
#endif // defined(PA_HAS_64_BITS_POINTERS)
};
// Interface passed to `AddressPoolManager::DumpStats()` to mediate
// for `AddressSpaceDumpProvider`.
class BASE_EXPORT AddressSpaceStatsDumper {
public:
virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
};
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_

View File

@ -0,0 +1,41 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/immediate_crash.h"
#if defined(PA_HAS_ALLOCATION_GUARD)
namespace partition_alloc {
namespace {
thread_local bool g_disallow_allocations;
} // namespace
ScopedDisallowAllocations::ScopedDisallowAllocations() {
if (g_disallow_allocations)
IMMEDIATE_CRASH();
g_disallow_allocations = true;
}
ScopedDisallowAllocations::~ScopedDisallowAllocations() {
g_disallow_allocations = false;
}
ScopedAllowAllocations::ScopedAllowAllocations() {
// Save the previous value, as ScopedAllowAllocations is used in all
// partitions, not just the malloc() ones(s).
saved_value_ = g_disallow_allocations;
g_disallow_allocations = false;
}
ScopedAllowAllocations::~ScopedAllowAllocations() {
g_disallow_allocations = saved_value_;
}
} // namespace partition_alloc
#endif // defined(PA_HAS_ALLOCATION_GUARD)

View File

@ -0,0 +1,48 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "build/build_config.h"
namespace partition_alloc {
#if defined(PA_HAS_ALLOCATION_GUARD)
// Disallow allocations in the scope. Does not nest.
class ScopedDisallowAllocations {
public:
ScopedDisallowAllocations();
~ScopedDisallowAllocations();
};
// Disallow allocations in the scope. Does not nest.
class ScopedAllowAllocations {
public:
ScopedAllowAllocations();
~ScopedAllowAllocations();
private:
bool saved_value_;
};
#else
struct [[maybe_unused]] ScopedDisallowAllocations{};
struct [[maybe_unused]] ScopedAllowAllocations{};
#endif // defined(PA_HAS_ALLOCATION_GUARD)
} // namespace partition_alloc
namespace base::internal {
using ::partition_alloc::ScopedAllowAllocations;
using ::partition_alloc::ScopedDisallowAllocations;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_

View File

@ -0,0 +1,50 @@
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file contains a test function for checking Arm's branch target
# identification (BTI) feature, which helps mitigate jump-oriented
# programming. To get it working, BTI instructions must be executed
# on a compatible core, and the executable pages must be mapped with
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
# correctly:
# 1) Allocate a read-write page.
# 2) Copy between the start and end symbols into that page.
# 3) Set the page to read-execute with PROT_BTI.
# 4) Call the first offset of the page, verify the result.
# 5) Call the second offset of the page (skipping the landing pad).
# Verify that it crashes as expected.
# This test works irrespective of whether BTI is enabled for C/C++
# objects via -mbranch-protection=standard.
.text
.global arm_bti_test_function
.global arm_bti_test_function_invalid_offset
.global arm_bti_test_function_end
arm_bti_test_function:
# Mark the start of this function as a valid call target.
bti jc
add x0, x0, #1
arm_bti_test_function_invalid_offset:
# This label simulates calling an incomplete function.
# Jumping here should crash systems which support BTI.
add x0, x0, #2
ret
arm_bti_test_function_end:
nop
// For details see section "6.2 Program Property" in
// "ELF for the Arm 64-bit Architecture (AArch64)"
// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property
.pushsection .note.gnu.property, "a";
.balign 8;
.long 4;
.long 0x10;
.long 0x5;
.asciz "GNU";
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4;
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
.long 0;
.popsection

View File

@ -0,0 +1,31 @@
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
#include "build/build_config.h"
#if defined(ARCH_CPU_ARM64)
extern "C" {
/**
* A valid BTI function. Jumping to this funtion should not cause any problem in
* a BTI enabled environment.
**/
int64_t arm_bti_test_function(int64_t);
/**
* A function without proper BTI landing pad. Jumping here should crash the
* program on systems which support BTI.
**/
int64_t arm_bti_test_function_invalid_offset(int64_t);
/**
* A simple function which immediately returns to sender.
**/
void arm_bti_test_function_end(void);
}
#endif // defined(ARCH_CPU_ARM64)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_

View File

@ -0,0 +1,47 @@
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/base_export.h"
namespace partition_alloc {
namespace {
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
} // namespace
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
PA_DCHECK(g_dangling_raw_ptr_detected_fn);
return g_dangling_raw_ptr_detected_fn;
}
DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
PA_DCHECK(g_dangling_raw_ptr_released_fn);
return g_dangling_raw_ptr_released_fn;
}
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
PA_DCHECK(fn);
g_dangling_raw_ptr_detected_fn = fn;
}
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
PA_DCHECK(fn);
g_dangling_raw_ptr_released_fn = fn;
}
namespace internal {
BASE_EXPORT void DanglingRawPtrDetected(uintptr_t id) {
g_dangling_raw_ptr_detected_fn(id);
}
BASE_EXPORT void DanglingRawPtrReleased(uintptr_t id) {
g_dangling_raw_ptr_released_fn(id);
}
} // namespace internal
} // namespace partition_alloc

View File

@ -0,0 +1,52 @@
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
#include <cstdint>
#include "base/base_export.h"
// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
// raw_ptr are reported. Its behavior can be configured here.
//
// Purpose of this level of indirection:
// - Ease testing.
// - Keep partition_alloc/ independent from base/. In most cases, when a
// dangling raw_ptr is detected/released, this involves recording a
// base::debug::StackTrace, which isn't desirable inside partition_alloc/.
// - Be able (potentially) to turn this feature on/off at runtime based on
// dependant's flags.
namespace partition_alloc {
// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
// memory region and the allocator is asked to release it.
//
// It won't be called again with the same `id`, up until (potentially) a call to
// DanglingRawPtrReleased(`id`) is made.
//
// This function is called from within the allocator, and is not allowed to
// allocate memory.
using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
BASE_EXPORT DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
BASE_EXPORT void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
// last dangling raw_ptr stops referencing the memory region.
//
// This function is allowed to allocate memory.
using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
BASE_EXPORT DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
BASE_EXPORT void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
namespace internal {
BASE_EXPORT void DanglingRawPtrDetected(uintptr_t id);
BASE_EXPORT void DanglingRawPtrReleased(uintptr_t id);
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_

View File

@ -0,0 +1,91 @@
digraph G {
graph[bgcolor=transparent]
node[shape=plaintext]
edge[style=dashed]
invisible_a[label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR>
<TD PORT="red" WIDTH="100"></TD>
<TD PORT="green" WIDTH="20"></TD>
<TD PORT="blue" WIDTH="40"></TD>
<TD PORT="gold" WIDTH="300"></TD>
<TD PORT="pink" WIDTH="60"></TD>
</TR>
</TABLE>
>]
superpage[xlabel="Super Page",label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
<TR>
<!-- Head Partition Page -->
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
<TD PORT="metadata"></TD>
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
<!-- Several Slot Spans -->
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
<TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
<TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
<TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
<TD WIDTH="79">...</TD>
<!-- Tail Partition Page -->
<TD BGCOLOR="darkgrey" WIDTH="39"></TD>
</TR>
</TABLE>
>]
invisible_b[label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR>
<TD PORT="green" WIDTH="30"></TD>
<TD PORT="blue" WIDTH="60"></TD>
<TD PORT="gold" WIDTH="180"></TD>
<TD PORT="red" WIDTH="90"></TD>
<TD PORT="pink" WIDTH="90"></TD>
</TR>
</TABLE>
>]
metadata_page[xlabel="Metadata",label=<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD>
<!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD>
<TD BGCOLOR="crimson">+</TD>
<!-- Green Slot Span Metadata -->
<TD BGCOLOR="palegreen">v</TD>
<!-- Blue Slot Span Metadata -->
<TD BGCOLOR="cornflowerblue">v</TD>
<TD BGCOLOR="cornflowerblue">+</TD>
<!-- Gold Slot Span Metadata -->
<TD BGCOLOR="gold">v</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<TD BGCOLOR="gold">+</TD>
<!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD>
<TD BGCOLOR="crimson">+</TD>
<!-- Pink Slot Span Metadata -->
<TD BGCOLOR="deeppink">v</TD>
<!-- etc. -->
<TD WIDTH="64">...</TD>
<!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD>
</TR>
</TABLE>
>]
invisible_a:red->superpage:red->superpage:red2[color=crimson]
superpage:red2->invisible_b:red[color=crimson]
invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
superpage:metadata->metadata_page[style="",arrowhead=odot]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -0,0 +1,86 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/extended_api.h"
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace base {
#if defined(PA_THREAD_CACHE_SUPPORTED)
namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->with_thread_cache)
return;
internal::ThreadCacheRegistry::Instance().PurgeAll();
root->with_thread_cache = false;
// Doesn't destroy the thread cache object(s). For background threads, they
// will be collected (and free cached memory) at thread destruction
// time. For the main thread, we leak it.
}
void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) {
if (!root)
return;
root->with_thread_cache = true;
}
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void DisablePartitionAllocThreadCacheForProcess() {
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
auto* aligned_allocator = internal::PartitionAllocMalloc::AlignedAllocator();
DisableThreadCacheForRootIfEnabled(regular_allocator);
if (aligned_allocator != regular_allocator)
DisableThreadCacheForRootIfEnabled(aligned_allocator);
DisableThreadCacheForRootIfEnabled(
internal::PartitionAllocMalloc::OriginalAllocator());
}
#endif // defined(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if defined(PA_THREAD_CACHE_SUPPORTED)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
DisablePartitionAllocThreadCacheForProcess();
#else
PA_CHECK(!internal::ThreadCache::IsValid(internal::ThreadCache::Get()));
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::ThreadCache::SwapForTesting(root);
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if defined(PA_THREAD_CACHE_SUPPORTED)
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
internal::ThreadCache::SwapForTesting(regular_allocator);
#else
internal::ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}
} // namespace base

View File

@ -0,0 +1,27 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/base_export.h"
namespace base {
// These two functions are unsafe to run if there are multiple threads running
// in the process.
//
// Disables the thread cache for the entire process, and replaces it with a
// thread cache for |root|.
BASE_EXPORT void SwapOutProcessThreadCacheForTesting(
ThreadSafePartitionRoot* root);
// Disables the current thread cache, and replaces it with the default for the
// process.
BASE_EXPORT void SwapInProcessThreadCacheForTesting(
ThreadSafePartitionRoot* root);
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -0,0 +1,96 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/no_destructor.h"
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
// cause significant jank.
#define PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM 0
namespace partition_alloc {
// static
MemoryReclaimer* MemoryReclaimer::Instance() {
static base::NoDestructor<MemoryReclaimer> instance;
return instance.get();
}
void MemoryReclaimer::RegisterPartition(PartitionRoot<>* partition) {
internal::ScopedGuard lock(lock_);
PA_DCHECK(partition);
auto it_and_whether_inserted = partitions_.insert(partition);
PA_DCHECK(it_and_whether_inserted.second);
}
void MemoryReclaimer::UnregisterPartition(
PartitionRoot<internal::ThreadSafe>* partition) {
internal::ScopedGuard lock(lock_);
PA_DCHECK(partition);
size_t erased_count = partitions_.erase(partition);
PA_DCHECK(erased_count == 1u);
}
MemoryReclaimer::MemoryReclaimer() = default;
MemoryReclaimer::~MemoryReclaimer() = default;
void MemoryReclaimer::ReclaimAll() {
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages |
PurgeFlags::kAggressiveReclaim;
Reclaim(kFlags);
}
void MemoryReclaimer::ReclaimNormal() {
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages;
Reclaim(kFlags);
}
void MemoryReclaimer::Reclaim(int flags) {
internal::ScopedGuard lock(
lock_); // Has to protect from concurrent (Un)Register calls.
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
#if PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM
{
using PCScan = internal::PCScan;
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::PerformScanIfNeeded(invocation_mode);
}
#endif
#if defined(PA_THREAD_CACHE_SUPPORTED)
// Don't completely empty the thread cache outside of low memory situations,
// as there is periodic purge which makes sure that it doesn't take too much
// space.
if (flags & PurgeFlags::kAggressiveReclaim)
base::internal::ThreadCacheRegistry::Instance().PurgeAll();
#endif
for (auto* partition : partitions_)
partition->PurgeMemory(flags);
}
void MemoryReclaimer::ResetForTesting() {
internal::ScopedGuard lock(lock_);
partitions_.clear();
}
} // namespace partition_alloc

View File

@ -0,0 +1,81 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#include <memory>
#include <set>
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/no_destructor.h"
#include "base/thread_annotations.h"
#include "base/time/time.h"
namespace partition_alloc {
// Posts and handles memory reclaim tasks for PartitionAlloc.
//
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
// context of the provided |SequencedTaskRunner|, meaning that the caller must
// take care of this runner being compatible with the various partitions.
//
// Singleton as this runs as long as the process is alive, and
// having multiple instances would be wasteful.
class BASE_EXPORT MemoryReclaimer {
public:
static MemoryReclaimer* Instance();
MemoryReclaimer(const MemoryReclaimer&) = delete;
MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
// Internal. Do not use.
// Registers a partition to be tracked by the reclaimer.
void RegisterPartition(PartitionRoot<>* partition);
// Internal. Do not use.
// Unregisters a partition to be tracked by the reclaimer.
void UnregisterPartition(PartitionRoot<>* partition);
// Triggers an explicit reclaim now to reclaim as much free memory as
// possible. The API callers need to invoke this method periodically
// if they want to use memory reclaimer.
// See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
void ReclaimNormal();
// Returns a recommended interval to invoke ReclaimNormal.
int64_t GetRecommendedReclaimIntervalInMicroseconds() {
return base::Seconds(4).InMicroseconds();
}
// Triggers an explicit reclaim now reclaiming all free memory
void ReclaimAll();
private:
MemoryReclaimer();
~MemoryReclaimer();
// |flags| is an OR of base::PartitionPurgeFlags
void Reclaim(int flags);
void ReclaimAndReschedule();
void ResetForTesting();
internal::Lock lock_;
std::set<PartitionRoot<>*> partitions_ GUARDED_BY(lock_);
friend class base::NoDestructor<MemoryReclaimer>;
friend class MemoryReclaimerTest;
};
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::MemoryReclaimer;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_

View File

@ -0,0 +1,23 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/compiler_specific.h"
#include "base/immediate_crash.h"
#include "base/process/memory.h"
namespace partition_alloc::internal {
// The crash is generated in a NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] NOINLINE void NOT_TAIL_CALLED OnNoMemory(size_t size) {
RunPartitionAllocOomCallback();
base::TerminateBecauseOutOfMemory(size);
IMMEDIATE_CRASH();
}
} // namespace partition_alloc::internal

View File

@ -0,0 +1,34 @@
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#include <cstddef>
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
namespace partition_alloc::internal {
// The crash is generated in a NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] BASE_EXPORT void NOT_TAIL_CALLED OnNoMemory(size_t size);
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.
// OOM_CRASH(size) is called by users of PageAllocator (including
// PartitionAlloc) to signify an allocation failure from the platform.
#define OOM_CRASH(size) \
do { \
/* Raising an exception might allocate, allow that. */ \
::partition_alloc::ScopedAllowAllocations guard{}; \
::partition_alloc::internal::OnNoMemory(size); \
} while (0)
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_

View File

@ -0,0 +1,27 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
namespace partition_alloc {
namespace {
PartitionAllocOomCallback g_oom_callback;
} // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
PA_DCHECK(!g_oom_callback);
g_oom_callback = callback;
}
namespace internal {
void RunPartitionAllocOomCallback() {
if (g_oom_callback)
g_oom_callback();
}
} // namespace internal
} // namespace partition_alloc

View File

@ -0,0 +1,26 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#include "base/base_export.h"
namespace partition_alloc {
using PartitionAllocOomCallback = void (*)();
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
// allocation failure from the platform.
BASE_EXPORT void SetPartitionAllocOomCallback(
PartitionAllocOomCallback callback);
namespace internal {
BASE_EXPORT void RunPartitionAllocOomCallback();
} // namespace internal
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_

View File

@ -0,0 +1,381 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include <atomic>
#include <cstdint>
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/bits.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#endif
#if BUILDFLAG(IS_WIN)
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
#elif BUILDFLAG(IS_POSIX)
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
#elif BUILDFLAG(IS_FUCHSIA)
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
#else
#error Platform not supported.
#endif
namespace partition_alloc {
namespace {
internal::Lock g_reserve_lock;
// We may reserve/release address space on different threads.
internal::Lock& GetReserveLock() {
return g_reserve_lock;
}
std::atomic<size_t> g_total_mapped_address_space;
// We only support a single block of reserved address space.
uintptr_t s_reservation_address GUARDED_BY(GetReserveLock()) = 0;
size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0;
uintptr_t AllocPagesIncludingReserved(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
uintptr_t ret =
internal::SystemAllocPages(address, length, accessibility, page_tag);
if (!ret) {
const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
ret =
internal::SystemAllocPages(address, length, accessibility, page_tag);
}
}
return ret;
}
// Trims memory at |base_address| to given |trim_length| and |alignment|.
//
// On failure, on Windows, this function returns 0 and frees memory at
// |base_address|.
uintptr_t TrimMapping(uintptr_t base_address,
size_t base_length,
size_t trim_length,
uintptr_t alignment,
uintptr_t alignment_offset,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(base_length >= trim_length);
PA_DCHECK(base::bits::IsPowerOfTwo(alignment));
PA_DCHECK(alignment_offset < alignment);
uintptr_t new_base =
NextAlignedWithOffset(base_address, alignment, alignment_offset);
PA_DCHECK(new_base >= base_address);
size_t pre_slack = new_base - base_address;
size_t post_slack = base_length - pre_slack - trim_length;
PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
PA_DCHECK(pre_slack < base_length);
PA_DCHECK(post_slack < base_length);
return internal::TrimMappingInternal(base_address, base_length, trim_length,
accessibility, pre_slack, post_slack);
}
} // namespace
// Align |address| up to the closest, non-smaller address, that gives
// |requested_offset| remainder modulo |alignment|.
//
// Examples for alignment=1024 and requested_offset=64:
// 64 -> 64
// 65 -> 1088
// 1024 -> 1088
// 1088 -> 1088
// 1089 -> 2112
// 2048 -> 2112
uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t alignment,
uintptr_t requested_offset) {
PA_DCHECK(base::bits::IsPowerOfTwo(alignment));
PA_DCHECK(requested_offset < alignment);
uintptr_t actual_offset = address & (alignment - 1);
uintptr_t new_address;
if (actual_offset <= requested_offset)
new_address = address + requested_offset - actual_offset;
else
new_address = address + alignment + requested_offset - actual_offset;
PA_DCHECK(new_address >= address);
PA_DCHECK(new_address - address < alignment);
PA_DCHECK(new_address % alignment == requested_offset);
return new_address;
}
namespace internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret =
internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
return ret;
}
} // namespace internal
uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
page_tag);
}
uintptr_t AllocPages(uintptr_t address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
page_tag);
}
void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return reinterpret_cast<void*>(
AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
accessibility, page_tag));
}
uintptr_t AllocPagesWithAlignOffset(
uintptr_t address,
size_t length,
size_t align,
size_t align_offset,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PA_DCHECK(length >= internal::PageAllocationGranularity());
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
PA_DCHECK(base::bits::IsPowerOfTwo(align));
PA_DCHECK(align_offset < align);
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
// If the client passed null as the address, choose a good one.
if (!address) {
address = (GetRandomPageBase() & align_base_mask) + align_offset;
}
// First try to force an exact-size, aligned allocation from our random base.
#if defined(ARCH_CPU_32_BITS)
// On 32 bit systems, first try one random aligned address, and then try an
// aligned address derived from the value of |ret|.
constexpr int kExactSizeTries = 2;
#else
// On 64 bit systems, try 3 random aligned addresses.
constexpr int kExactSizeTries = 3;
#endif
for (int i = 0; i < kExactSizeTries; ++i) {
uintptr_t ret =
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
if (ret) {
// If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset)
return ret;
// Free the memory and try again.
FreePages(ret, length);
} else {
// |ret| is null; if this try was unhinted, we're OOM.
if (internal::kHintIsAdvisory || !address)
return 0;
}
#if defined(ARCH_CPU_32_BITS)
// For small address spaces, try the first aligned address >= |ret|. Note
// |ret| may be null, in which case |address| becomes null. If
// |align_offset| is non-zero, this calculation may get us not the first,
// but the next matching address.
address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
#endif
}
// Make a larger allocation so we can force alignment.
size_t try_length = length + (align - internal::PageAllocationGranularity());
PA_CHECK(try_length >= length);
uintptr_t ret;
do {
// Continue randomizing only on POSIX.
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret && (ret = TrimMapping(ret, try_length, length, align,
align_offset, accessibility)) == 0);
return ret;
}
void FreePages(uintptr_t address, size_t length) {
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
internal::FreePagesInternal(address, length);
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
}
void FreePages(void* address, size_t length) {
FreePages(reinterpret_cast<uintptr_t>(address), length);
}
bool TrySetSystemPagesAccess(uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
return internal::TrySetSystemPagesAccessInternal(address, length,
accessibility);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
accessibility);
}
void SetSystemPagesAccess(uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DecommitSystemPagesInternal(address, length,
accessibility_disposition);
}
void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
accessibility_disposition);
}
void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DecommitAndZeroSystemPagesInternal(address, length);
}
void DecommitAndZeroSystemPages(void* address, size_t length) {
DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
void RecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
internal::RecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition);
}
bool TryRecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Duplicated because we want errors to be reported at a lower level in the
// crashing case.
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
return internal::TryRecommitSystemPagesInternal(
address, length, accessibility, accessibility_disposition);
}
void DiscardSystemPages(uintptr_t address, size_t length) {
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
internal::DiscardSystemPagesInternal(address, length);
}
void DiscardSystemPages(void* address, size_t length) {
DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address) {
uintptr_t mem = internal::SystemAllocPages(
0, size, PageAccessibilityConfiguration::kInaccessible,
PageTag::kChromium);
if (mem) {
// We guarantee this alignment when reserving address space.
PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
s_reservation_address = mem;
s_reservation_size = size;
return true;
}
}
return false;
}
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address)
return false;
FreePages(s_reservation_address, s_reservation_size);
s_reservation_address = 0;
s_reservation_size = 0;
return true;
}
bool HasReservationForTesting() {
internal::ScopedGuard guard(GetReserveLock());
return s_reservation_address;
}
uint32_t GetAllocPageErrorCode() {
return internal::s_allocPageErrorCode;
}
size_t GetTotalMappedSize() {
return g_total_mapped_address_space;
}
} // namespace partition_alloc

View File

@ -0,0 +1,348 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#include <cstddef>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace partition_alloc {
enum class PageAccessibilityConfiguration {
kInaccessible,
kRead,
kReadWrite,
// This flag is mapped to kReadWrite on systems that
// don't support MTE.
kReadWriteTagged,
// This flag is mapped to kReadExecute on systems
// that don't support Arm's BTI.
kReadExecuteProtected,
kReadExecute,
// This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
kReadWriteExecute,
};
// Use for De/RecommitSystemPages API.
enum class PageAccessibilityDisposition {
// Enforces permission update (Decommit will set to
// PageAccessibilityConfiguration::kInaccessible;
// Recommit will set to whatever was requested, other than
// PageAccessibilityConfiguration::kInaccessible).
kRequireUpdate,
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
// only).
kAllowKeepForPerf,
};
// macOS supports tagged memory regions, to help in debugging. On Android,
// these tags are used to name anonymous mappings.
enum class PageTag {
kFirst = 240, // Minimum tag value.
kBlinkGC = 252, // Blink GC pages.
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
kChromium = 254, // Chromium page.
kV8 = 255, // V8 heap pages.
kLast = kV8 // Maximum tag value.
};
BASE_EXPORT uintptr_t NextAlignedWithOffset(uintptr_t ptr,
uintptr_t alignment,
uintptr_t requested_offset);
// Allocates one or more pages.
//
// The requested |address| is just a hint; the actual address returned may
// differ. The returned address will be aligned to |align_offset| modulo |align|
// bytes.
//
// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
// |align_offset| must be less than |align|. |align| must be a power of two.
//
// If |address| is 0/nullptr, then a suitable and randomized address will be
// chosen automatically.
//
// |accessibility| controls the permission of the allocated pages.
// PageAccessibilityConfiguration::kInaccessible means uncommitted.
//
// |page_tag| is used on some platforms to identify the source of the
// allocation. Use PageTag::kChromium as a catch-all category.
//
// This call will return 0/nullptr if the allocation cannot be satisfied.
BASE_EXPORT uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
BASE_EXPORT uintptr_t AllocPages(uintptr_t address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
BASE_EXPORT void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
BASE_EXPORT uintptr_t
AllocPagesWithAlignOffset(uintptr_t address,
size_t length,
size_t align,
size_t align_offset,
PageAccessibilityConfiguration page_accessibility,
PageTag page_tag);
// Frees one or more pages starting at |address| and continuing for |length|
// bytes.
//
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
// |length| must be a multiple of |PageAllocationGranularity()|.
BASE_EXPORT void FreePages(uintptr_t address, size_t length);
BASE_EXPORT void FreePages(void* address, size_t length);
// Marks one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
[[nodiscard]] BASE_EXPORT bool TrySetSystemPagesAccess(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
[[nodiscard]] BASE_EXPORT bool TrySetSystemPagesAccess(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Marks one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Performs a CHECK that the operation succeeds.
BASE_EXPORT void SetSystemPagesAccess(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
BASE_EXPORT void SetSystemPagesAccess(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Decommits one or more system pages starting at |address| and continuing for
// |length| bytes. |address| and |length| must be aligned to a system page
// boundary.
//
// This API will crash if the operation cannot be performed!
//
// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
// the decommitted pages will be made inaccessible before the call returns.
// While it is always a programming error to access decommitted pages without
// first recommitting them, callers may use
// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
// to skip changing permissions (use with care), for performance reasons (see
// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
// in the past). Implementations may choose to always modify permissions, hence
// accessing those pages may or may not trigger a fault.
//
// Decommitting means that physical resources (RAM or swap/pagefile) backing the
// allocated virtual address range may be released back to the system, but the
// address space is still allocated to the process (possibly using up page table
// entries or other accounting resources). There is no guarantee that the pages
// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
//
// This operation may not be atomic on some platforms.
//
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
// processes will not fault when touching a committed memory region. There is
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
// best-effort allocated resources on the first touch. If
// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
// behaves in a platform-agnostic way by simulating the Windows "decommit" state
// by both discarding the region (allowing the OS to avoid swap operations)
// *and* changing the page protections so accesses fault.
BASE_EXPORT void DecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition);
BASE_EXPORT void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition);
// Decommits one or more system pages starting at |address| and continuing for
// |length| bytes. |address| and |length| must be aligned to a system page
// boundary.
//
// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
// zeroed and will always mark the region as inaccessible (the equivalent of
// setting them to PageAccessibilityConfiguration::kInaccessible).
//
// This API will crash if the operation cannot be performed.
BASE_EXPORT void DecommitAndZeroSystemPages(uintptr_t address, size_t length);
BASE_EXPORT void DecommitAndZeroSystemPages(void* address, size_t length);
// Whether decommitted memory is guaranteed to be zeroed when it is
// recommitted. Do not assume that this will not change over time.
constexpr BASE_EXPORT bool DecommittedMemoryIsAlwaysZeroed() {
#if BUILDFLAG(IS_APPLE)
return false;
#else
return true;
#endif
}
// (Re)Commits one or more system pages, starting at |address| and continuing
// for |length| bytes with the given |page_accessibility| (must not be
// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
// must be aligned to a system page boundary.
//
// This API will crash if the operation cannot be performed!
//
// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
// updates the pages to |page_accessibility|. This can be used regardless of
// what disposition was used to decommit the pages.
// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
// to leave the page permissions, if that improves performance. This option can
// only be used if the pages were previously accessible and decommitted with
// that same option.
//
// The memory will be zeroed when it is committed for the first time. However,
// there is no such guarantee when memory is recommitted, unless
// |DecommittedMemoryIsAlwaysZeroed()| is true.
//
// This operation may not be atomic on some platforms.
BASE_EXPORT void RecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition);
// Like RecommitSystemPages(), but returns false instead of crashing.
[[nodiscard]] BASE_EXPORT bool TryRecommitSystemPages(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition);
// Discard one or more system pages starting at |address| and continuing for
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
//
// Discarding is a hint to the system that the page is no longer required. The
// hint may:
// - Do nothing.
// - Discard the page immediately, freeing up physical pages.
// - Discard the page at some time in the future in response to memory
// pressure.
//
// Only committed pages should be discarded. Discarding a page does not decommit
// it, and it is valid to discard an already-discarded page. A read or write to
// a discarded page will not fault.
//
// Reading from a discarded page may return the original page content, or a page
// full of zeroes.
//
// Writing to a discarded page is the only guaranteed way to tell the system
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
BASE_EXPORT void DiscardSystemPages(uintptr_t address, size_t length);
BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundUpToSystemPage(uintptr_t address) {
return (address + internal::SystemPageOffsetMask()) &
internal::SystemPageBaseMask();
}
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundDownToSystemPage(uintptr_t address) {
return address & internal::SystemPageBaseMask();
}
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) {
return (address + internal::PageAllocationGranularityOffsetMask()) &
internal::PageAllocationGranularityBaseMask();
}
// Rounds down |address| to the previous multiple of
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) {
return address & internal::PageAllocationGranularityBaseMask();
}
// Reserves (at least) |size| bytes of address space, aligned to
// |PageAllocationGranularity()|. This can be called early on to make it more
// likely that large allocations will succeed. Returns true if the reservation
// succeeded, false if the reservation failed or a reservation was already made.
BASE_EXPORT bool ReserveAddressSpace(size_t size);
// Releases any reserved address space. |AllocPages| calls this automatically on
// an allocation failure. External allocators may also call this on failure.
//
// Returns true when an existing reservation was released.
BASE_EXPORT bool ReleaseReservation();
// Returns true if there is currently an address space reservation.
BASE_EXPORT bool HasReservationForTesting();
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
// (POSIX) or |VirtualAlloc| (Windows) fails.
BASE_EXPORT uint32_t GetAllocPageErrorCode();
// Returns the total amount of mapped pages from all clients of
// PageAllocator. These pages may or may not be committed. This is mostly useful
// to assess address space pressure.
BASE_EXPORT size_t GetTotalMappedSize();
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::AllocPages;
using ::partition_alloc::AllocPagesWithAlignOffset;
using ::partition_alloc::DecommitAndZeroSystemPages;
using ::partition_alloc::DecommitSystemPages;
using ::partition_alloc::DecommittedMemoryIsAlwaysZeroed;
using ::partition_alloc::DiscardSystemPages;
using ::partition_alloc::FreePages;
using ::partition_alloc::GetAllocPageErrorCode;
using ::partition_alloc::GetTotalMappedSize;
using ::partition_alloc::HasReservationForTesting;
using ::partition_alloc::NextAlignedWithOffset;
using ::partition_alloc::PageAccessibilityConfiguration;
using ::partition_alloc::PageAccessibilityDisposition;
using ::partition_alloc::PageTag;
using ::partition_alloc::RecommitSystemPages;
using ::partition_alloc::ReleaseReservation;
using ::partition_alloc::ReserveAddressSpace;
using ::partition_alloc::RoundDownToPageAllocationGranularity;
using ::partition_alloc::RoundDownToSystemPage;
using ::partition_alloc::RoundUpToPageAllocationGranularity;
using ::partition_alloc::RoundUpToSystemPage;
using ::partition_alloc::SetSystemPagesAccess;
using ::partition_alloc::TryRecommitSystemPages;
using ::partition_alloc::TrySetSystemPagesAccess;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_

View File

@ -0,0 +1,134 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#include <stddef.h>
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
#include <mach/vm_page_size.h>
// Although page allocator constants are not constexpr, they are run-time
// constant. Because the underlying variables they access, such as vm_page_size,
// are not marked const, the compiler normally has no way to know that they
// dont change and must obtain their values whenever it can't prove that they
// haven't been modified, even if they had already been obtained previously.
// Attaching __attribute__((const)) to these declarations allows these redundant
// accesses to be omitted under optimization such as common subexpression
// elimination.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#else
// When defined, page size constants are fixed at compile time. When not
// defined, they may vary at run time.
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
// Use this macro to declare a function as constexpr or not based on whether
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
#endif
namespace partition_alloc::internal {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularityShift() {
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return vm_page_shift;
#else
return 12; // 4kB
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularity() {
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
// below, but was separated out for OS_APPLE to avoid << on a non-constexpr.
return vm_page_size;
#else
return 1 << PageAllocationGranularityShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularityOffsetMask() {
return PageAllocationGranularity() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularityBaseMask() {
return ~PageAllocationGranularityOffsetMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageShift() {
// On Windows allocation granularity is higher than the page size. This comes
// into play when reserving address space range (allocation granularity),
// compared to committing pages into memory (system page granularity).
#if BUILDFLAG(IS_WIN)
return 12; // 4096=1<<12
#else
return PageAllocationGranularityShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageSize() {
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
// This is literally equivalent to |1 << SystemPageShift()| below, but was
// separated out for 64-bit OS_APPLE to avoid << on a non-constexpr.
return PageAllocationGranularity();
#else
return 1 << SystemPageShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageOffsetMask() {
return SystemPageSize() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageBaseMask() {
return ~SystemPageOffsetMask();
}
constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
} // namespace partition_alloc::internal
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::kPageMetadataShift;
using ::partition_alloc::internal::kPageMetadataSize;
using ::partition_alloc::internal::PageAllocationGranularity;
using ::partition_alloc::internal::PageAllocationGranularityBaseMask;
using ::partition_alloc::internal::PageAllocationGranularityOffsetMask;
using ::partition_alloc::internal::PageAllocationGranularityShift;
using ::partition_alloc::internal::SystemPageBaseMask;
using ::partition_alloc::internal::SystemPageOffsetMask;
using ::partition_alloc::internal::SystemPageShift;
using ::partition_alloc::internal::SystemPageSize;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_

View File

@ -0,0 +1,22 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
#include <cstddef>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator.h"
namespace partition_alloc::internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_

View File

@ -0,0 +1,238 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file implements memory allocation primitives for PageAllocator using
// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
// object that corresponds to a set of memory pages. VMO pages may be mapped
// to an address space. The code below creates VMOs for each memory allocations
// and maps them to the default address space of the current process.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
#include <cstdint>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/fuchsia/fuchsia_logging.h"
namespace partition_alloc::internal {
namespace {
// Returns VMO name for a PageTag.
const char* PageTagToName(PageTag tag) {
switch (tag) {
case PageTag::kBlinkGC:
return "cr_blink_gc";
case PageTag::kPartitionAlloc:
return "cr_partition_alloc";
case PageTag::kChromium:
return "cr_chromium";
case PageTag::kV8:
return "cr_v8";
default:
PA_DCHECK(false);
return "";
}
}
zx_vm_option_t PageAccessibilityToZxVmOptions(
PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
case PageAccessibilityConfiguration::kRead:
return ZX_VM_PERM_READ;
case PageAccessibilityConfiguration::kReadWrite:
case PageAccessibilityConfiguration::kReadWriteTagged:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
case PageAccessibilityConfiguration::kReadExecuteProtected:
case PageAccessibilityConfiguration::kReadExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
case PageAccessibilityConfiguration::kReadWriteExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
default:
PA_NOTREACHED();
[[fallthrough]];
case PageAccessibilityConfiguration::kInaccessible:
return 0;
}
}
} // namespace
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
// |hint| is not advisory.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{0};
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
zx::vmo vmo;
zx_status_t status = zx::vmo::create(length, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(INFO, status) << "zx_vmo_create";
return 0;
}
const char* vmo_name = PageTagToName(page_tag);
status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
// VMO names are used only for debugging, so failure to set a name is not
// fatal.
ZX_DCHECK(status == ZX_OK, status);
if (page_tag == PageTag::kV8) {
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
// in the new VMO.
status = vmo.replace_as_executable(zx::resource(), &vmo);
if (status != ZX_OK) {
ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
return 0;
}
}
zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
uint64_t vmar_offset = 0;
if (hint) {
vmar_offset = hint;
options |= ZX_VM_SPECIFIC;
}
uint64_t address;
status =
zx::vmar::root_self()->map(options, vmar_offset, vmo,
/*vmo_offset=*/0, length, &address);
if (status != ZX_OK) {
// map() is expected to fail if |hint| is set to an already-in-use location.
if (!hint) {
ZX_DLOG(ERROR, status) << "zx_vmar_map";
}
return 0;
}
return address;
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
// Unmap head if necessary.
if (pre_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
ZX_CHECK(status == ZX_OK, status);
}
// Unmap tail if necessary.
if (post_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(
base_address + pre_slack + trim_length, post_slack);
ZX_CHECK(status == ZX_OK, status);
}
return base_address + pre_slack;
}
bool TrySetSystemPagesAccessInternal(
uint64_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect(
PageAccessibilityToZxVmOptions(accessibility), address, length);
return status == ZX_OK;
}
void SetSystemPagesAccessInternal(
uint64_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect(
PageAccessibilityToZxVmOptions(accessibility), address, length);
ZX_CHECK(status == ZX_OK, status);
}
void FreePagesInternal(uint64_t address, size_t length) {
zx_status_t status = zx::vmar::root_self()->unmap(address, length);
ZX_CHECK(status == ZX_OK, status);
}
void DiscardSystemPagesInternal(uint64_t address, size_t length) {
// TODO(https://crbug.com/1022062): Mark pages as discardable, rather than
// forcibly de-committing them immediately, when Fuchsia supports it.
zx_status_t status = zx::vmar::root_self()->op_range(
ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0);
ZX_CHECK(status == ZX_OK, status);
}
void DecommitSystemPagesInternal(
uint64_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration::kInaccessible);
}
// TODO(https://crbug.com/1022062): Review whether this implementation is
// still appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
// discardable API.
DiscardSystemPagesInternal(address, length);
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration::kInaccessible);
// TODO(https://crbug.com/1022062): this implementation will likely no longer
// be appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
// discardable API.
DiscardSystemPagesInternal(address, length);
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length, accessibility);
}
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
return TrySetSystemPagesAccess(address, length, accessibility);
}
return true;
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_

View File

@ -0,0 +1,46 @@
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/cpu.h"
#include <sys/mman.h>
// PA_PROT_BTI requests a page that supports BTI landing pads.
#define PA_PROT_BTI 0x10
// PA_PROT_MTE requests a page that's suitable for memory tagging.
#define PA_PROT_MTE 0x20
namespace partition_alloc::internal {
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
case PageAccessibilityConfiguration::kRead:
return PROT_READ;
case PageAccessibilityConfiguration::kReadWriteTagged:
#if defined(ARCH_CPU_ARM64)
return PROT_READ | PROT_WRITE |
(base::CPU::GetInstanceNoAllocation().has_mte() ? PA_PROT_MTE : 0);
#else
[[fallthrough]];
#endif
case PageAccessibilityConfiguration::kReadWrite:
return PROT_READ | PROT_WRITE;
case PageAccessibilityConfiguration::kReadExecuteProtected:
return PROT_READ | PROT_EXEC |
(base::CPU::GetInstanceNoAllocation().has_bti() ? PA_PROT_BTI : 0);
case PageAccessibilityConfiguration::kReadExecute:
return PROT_READ | PROT_EXEC;
case PageAccessibilityConfiguration::kReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
default:
PA_NOTREACHED();
[[fallthrough]];
case PageAccessibilityConfiguration::kInaccessible:
return PROT_NONE;
}
}
} // namespace partition_alloc::internal

View File

@ -0,0 +1,375 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#include <algorithm>
#include <cerrno>
#include <cstdint>
#include <cstring>
#include <sys/mman.h>
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/dcheck_is_on.h"
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
#include "base/mac/scoped_cftyperef.h"
#include <Availability.h>
#include <Security/Security.h>
#include <mach/mach.h>
#endif
#if BUILDFLAG(IS_ANDROID)
#include <sys/prctl.h>
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <sys/resource.h>
#endif
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#if BUILDFLAG(IS_MAC)
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although its
// available on iOS and other Apple operating systems. It is, in fact, present
// on the system since macOS 10.12.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wavailability"
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task)
#if __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12
// When redeclaring something previously declared as unavailable, the
// weak_import attribute wont be applied unless manually set.
__attribute__((weak_import))
#endif // DT < 10.12
API_AVAILABLE(macos(10.12));
#pragma clang diagnostic pop
#endif // BUILDFLAG(IS_MAC)
namespace partition_alloc::internal {
namespace {
#if BUILDFLAG(IS_ANDROID)
const char* PageTagToName(PageTag tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_ndk the kernel keeps a pointer to the name instead
// of copying it.
//
// Having the name in .rodata ensures that the pointer remains valid as
// long as the mapping is alive.
switch (tag) {
case PageTag::kBlinkGC:
return "blink_gc";
case PageTag::kPartitionAlloc:
return "partition_alloc";
case PageTag::kChromium:
return "chromium";
case PageTag::kV8:
return "v8";
default:
PA_DCHECK(false);
return "";
}
}
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_MAC)
// Tests whether the version of macOS supports the MAP_JIT flag and if the
// current process is signed with the hardened runtime and the allow-jit
// entitlement, returning whether MAP_JIT should be used to allocate regions
// that will contain JIT-compiled executable code.
bool UseMapJit() {
if (!base::mac::IsAtLeastOS10_14()) {
// MAP_JIT existed before macOS 10.14, but had somewhat different semantics.
// Only one MAP_JIT region was permitted per process, but calling code here
// will very likely require more than one such region. Since MAP_JIT is not
// strictly necessary to write code to a region and then execute it on these
// older OSes, dont use it at all.
return false;
}
// Until determining that the hardened runtime is enabled, early returns will
// return true, so that MAP_JIT will be used. This is important on arm64,
// which only allows pages to be simultaneously writable and executable when
// in a region allocated with MAP_JIT, regardless of code signing options. On
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
// executable fails with EPERM. Although this is not enforced on x86_64,
// MAP_JIT is harmless in that case.
base::ScopedCFTypeRef<SecTaskRef> task(
SecTaskCreateFromSelf(kCFAllocatorDefault));
if (!task) {
return true;
}
uint32_t flags = SecTaskGetCodeSignStatus(task);
if (!(flags & kSecCodeSignatureRuntime)) {
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
// == CS_RUNTIME.
return true;
}
// The hardened runtime is enabled. From this point on, early returns must
// return false, indicating that MAP_JIT is not to be used. Its an error
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
// entitlement is specified.
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement)
return false;
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
}
#endif // BUILDFLAG(IS_MAC)
} // namespace
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
std::atomic<int32_t> s_allocPageErrorCode{0};
int GetAccessFlags(PageAccessibilityConfiguration accessibility);
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
#if BUILDFLAG(IS_APPLE)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
PA_DCHECK(PageTag::kFirst <= page_tag);
PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else
int fd = -1;
#endif
int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if BUILDFLAG(IS_MAC)
// On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit"
// code signing entitlement and allocating the region with the MAP_JIT flag.
static const bool kUseMapJit = UseMapJit();
if (page_tag == PageTag::kV8 && kUseMapJit) {
map_flags |= MAP_JIT;
}
#endif
void* ret = mmap(reinterpret_cast<void*>(hint), length, access_flag,
map_flags, fd, 0);
if (ret == MAP_FAILED) {
s_allocPageErrorCode = errno;
ret = nullptr;
}
#if BUILDFLAG(IS_ANDROID)
// On Android, anonymous mappings can have a name attached to them. This is
// useful for debugging, and double-checking memory attribution.
if (ret) {
// No error checking on purpose, testing only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
PageTagToName(page_tag));
}
#endif
return reinterpret_cast<uintptr_t>(ret);
}
bool TrySetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
return 0 == HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility)));
}
void SetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
int access_flags = GetAccessFlags(accessibility);
const int ret = HANDLE_EINTR(
mprotect(reinterpret_cast<void*>(address), length, access_flags));
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
// kernel data structures cannot be allocated, (2) the address range is
// invalid, or (3) this would split an existing mapping in a way that would
// exceed the maximum number of allowed mappings.
//
// Neither are very likely, but we still get a lot of crashes here. This is
// because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
// access flags match a "data" mapping, which in our case would be MAP_PRIVATE
// | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
// mm/mprotect.c in the kernel for details.
//
// In this case, we are almost certainly bumping into the sandbox limit, mark
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
OOM_CRASH(length);
PA_PCHECK(0 == ret);
}
void FreePagesInternal(uintptr_t address, size_t length) {
PA_PCHECK(0 == munmap(reinterpret_cast<void*>(address), length));
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
uintptr_t ret = base_address;
// We can resize the allocation run. Release unneeded memory before and after
// the aligned range.
if (pre_slack) {
FreePages(base_address, pre_slack);
ret = base_address + pre_slack;
}
if (post_slack) {
FreePages(ret + trim_length, post_slack);
}
return ret;
}
void DecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// In POSIX, there is no decommit concept. Discarding is an effective way of
// implementing the Windows semantics where the OS is allowed to not swap the
// pages in the region.
DiscardSystemPages(address, length);
bool change_permissions =
accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
#if DCHECK_IS_ON()
// This is not guaranteed, show that we're serious.
//
// More specifically, several callers have had issues with assuming that
// memory is zeroed, this would hopefully make these bugs more visible. We
// don't memset() everything, because ranges can be very large, and doing it
// over the entire range could make Chrome unusable with DCHECK_IS_ON().
//
// Only do it when we are about to change the permissions, since we don't know
// the previous permissions, and cannot restore them.
if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
// Memory may not be writable.
size_t size = std::min(length, 2 * SystemPageSize());
void* ptr = reinterpret_cast<void*>(address);
PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0);
memset(ptr, 0xcc, size);
}
#endif
// Make pages inaccessible, unless the caller requested to keep permissions.
//
// Note, there is a small window between these calls when the pages can be
// incorrectly touched and brought back to memory. Not ideal, but doing those
// operations in the opposite order resulted in PMF regression on Mac (see
// crbug.com/1153021).
if (change_permissions) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration::kInaccessible);
}
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
// a MAP_FIXED request is successful, then any previous mappings [...] for
// those whole pages containing any part of the address range [pa,pa+len)
// shall be removed, as if by an appropriate call to munmap(), before the
// new mapping is established." As a consequence, the memory will be
// zero-initialized on next access.
void* ptr = reinterpret_cast<void*>(address);
void* ret = mmap(ptr, length, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
PA_CHECK(ptr == ret);
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length, accessibility);
}
#if BUILDFLAG(IS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
#endif
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
if (!ok)
return false;
}
#if BUILDFLAG(IS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
#endif
return true;
}
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
void* ptr = reinterpret_cast<void*>(address);
#if BUILDFLAG(IS_APPLE)
int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
if (ret) {
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
ret = madvise(ptr, length, MADV_DONTNEED);
}
PA_PCHECK(ret == 0);
#else
// We have experimented with other flags, but with suboptimal results.
//
// MADV_FREE (Linux): Makes our memory measurements less predictable;
// performance benefits unclear.
//
// Therefore, we just do the simple thing: MADV_DONTNEED.
PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED));
#endif
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_

View File

@ -0,0 +1,199 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#include <versionhelpers.h>
#include <cstdint>
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal {
namespace {
// On Windows, discarded pages are not returned to the system immediately and
// not guaranteed to be zeroed when returned to the application.
using DiscardVirtualMemoryFunction = DWORD(WINAPI*)(PVOID virtualAddress,
SIZE_T size);
DiscardVirtualMemoryFunction s_discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
} // namespace
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
case PageAccessibilityConfiguration::kRead:
return PAGE_READONLY;
case PageAccessibilityConfiguration::kReadWrite:
case PageAccessibilityConfiguration::kReadWriteTagged:
return PAGE_READWRITE;
case PageAccessibilityConfiguration::kReadExecute:
case PageAccessibilityConfiguration::kReadExecuteProtected:
return PAGE_EXECUTE_READ;
case PageAccessibilityConfiguration::kReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
default:
PA_NOTREACHED();
[[fallthrough]];
case PageAccessibilityConfiguration::kInaccessible:
return PAGE_NOACCESS;
}
}
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
DWORD access_flag = GetAccessFlags(accessibility);
const DWORD type_flags =
(accessibility != PageAccessibilityConfiguration::kInaccessible)
? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE;
void* ret = VirtualAlloc(reinterpret_cast<void*>(hint), length, type_flags,
access_flag);
if (ret == nullptr) {
s_allocPageErrorCode = GetLastError();
}
return reinterpret_cast<uintptr_t>(ret);
}
uintptr_t TrimMappingInternal(uintptr_t base_address,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
uintptr_t ret = base_address;
if (pre_slack || post_slack) {
// We cannot resize the allocation run. Free it and retry at the aligned
// address within the freed range.
ret = base_address + pre_slack;
FreePages(base_address, base_length);
ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
}
return ret;
}
bool TrySetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility == PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
return nullptr !=
VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility));
}
void SetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility == PageAccessibilityConfiguration::kInaccessible) {
if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
}
} else {
if (!VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT)
OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(ERROR_SUCCESS == error);
}
}
}
void FreePagesInternal(uintptr_t address, size_t length) {
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), 0, MEM_RELEASE));
}
void DecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration::kInaccessible);
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
// "If a page is decommitted but not released, its state changes to reserved.
// Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
// release it. Attempts to read from or write to a reserved page results in an
// access violation exception."
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
// for MEM_COMMIT: "The function also guarantees that when the caller later
// initially accesses the memory, the contents will be zero."
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), length, MEM_DECOMMIT));
}
void RecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length, accessibility);
}
bool TryRecommitSystemPagesInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
return TrySetSystemPagesAccess(address, length, accessibility);
}
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
if (s_discard_virtual_memory ==
reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) {
// DiscardVirtualMemory's minimum supported client is Windows 8.1 Update.
// So skip GetProcAddress("DiscardVirtualMemory") if windows version is
// smaller than Windows 8.1.
if (IsWindows8Point1OrGreater()) {
s_discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
} else {
s_discard_virtual_memory = nullptr;
}
}
void* ptr = reinterpret_cast<void*>(address);
// Use DiscardVirtualMemory when available because it releases faster than
// MEM_RESET.
DWORD ret = 1;
if (s_discard_virtual_memory) {
ret = s_discard_virtual_memory(ptr, length);
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
if (ret) {
PA_CHECK(VirtualAlloc(ptr, length, MEM_RESET, PAGE_READWRITE));
}
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_

View File

@ -0,0 +1,189 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_address_space.h"
#include <array>
#include <cstdint>
#include <ostream>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#endif
namespace partition_alloc::internal {
#if defined(PA_HAS_64_BITS_POINTERS)
namespace {
#if BUILDFLAG(IS_WIN)
NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
NO_CODE_FOLDING();
PA_CHECK(false);
}
NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
NO_CODE_FOLDING();
PA_CHECK(false);
}
#endif // BUILDFLAG(IS_WIN)
NOINLINE void HandleGigaCageAllocFailure() {
NO_CODE_FOLDING();
uint32_t alloc_page_error_code = base::GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
// It's important to easily differentiate these two failures on Windows, so
// crash with different stacks.
#if BUILDFLAG(IS_WIN)
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
// it must be VA space exhaustion.
HandleGigaCageAllocFailureOutOfVASpace();
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
// On Windows <8.1, MEM_RESERVE increases commit charge to account for
// not-yet-committed PTEs needed to cover that VA space, if it was to be
// committed (see crbug.com/1101421#c16).
HandleGigaCageAllocFailureOutOfCommitCharge();
} else
#endif // BUILDFLAG(IS_WIN)
{
PA_CHECK(false);
}
}
} // namespace
alignas(kPartitionCachelineSize)
PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
return kRegularPoolSize;
}
ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return kBRPPoolSize;
}
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
void PartitionAddressSpace::Init() {
if (IsInitialized())
return;
setup_.regular_pool_base_address_ = AllocPages(
RegularPoolSize(), RegularPoolSize(),
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
if (!setup_.regular_pool_base_address_)
HandleGigaCageAllocFailure();
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.regular_pool_base_mask_ = ~(RegularPoolSize() - 1) & kMemTagUnmask;
#endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (RegularPoolSize() - 1)));
setup_.regular_pool_ = AddressPoolManager::GetInstance()->Add(
setup_.regular_pool_base_address_, RegularPoolSize());
PA_CHECK(setup_.regular_pool_ == kRegularPoolHandle);
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
RegularPoolSize() - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + RegularPoolSize()));
// Reserve an extra allocation granularity unit before the BRP pool, but keep
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
// is a valid pointer, and having a "forbidden zone" before the BRP pool
// prevents such a pointer from "sneaking into" the pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
uintptr_t base_address = AllocPagesWithAlignOffset(
0, BRPPoolSize() + kForbiddenZoneSize, BRPPoolSize(),
BRPPoolSize() - kForbiddenZoneSize,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
if (!base_address)
HandleGigaCageAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.brp_pool_base_mask_ = ~(BRPPoolSize() - 1) & kMemTagUnmask;
#endif
PA_DCHECK(!(setup_.brp_pool_base_address_ & (BRPPoolSize() - 1)));
setup_.brp_pool_ = AddressPoolManager::GetInstance()->Add(
setup_.brp_pool_base_address_, BRPPoolSize());
PA_CHECK(setup_.brp_pool_ == kBRPPoolHandle);
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + BRPPoolSize() - 1));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + BRPPoolSize()));
#if PA_STARSCAN_USE_CARD_TABLE
// Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance()->Reserve(
setup_.regular_pool_, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool";
#endif // PA_STARSCAN_USE_CARD_TABLE
}
void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
size_t size) {
// The ConfigurablePool must only be initialized once.
PA_CHECK(!IsConfigurablePoolInitialized());
// The other Pools must be initialized first.
Init();
PA_CHECK(pool_base);
PA_CHECK(size <= kConfigurablePoolMaxSize);
PA_CHECK(size >= kConfigurablePoolMinSize);
PA_CHECK(base::bits::IsPowerOfTwo(size));
PA_CHECK(pool_base % size == 0);
setup_.configurable_pool_base_address_ = pool_base;
setup_.configurable_pool_base_mask_ = ~(size - 1);
setup_.configurable_pool_ = AddressPoolManager::GetInstance()->Add(
setup_.configurable_pool_base_address_, size);
PA_CHECK(setup_.configurable_pool_ == kConfigurablePoolHandle);
}
void PartitionAddressSpace::UninitForTesting() {
FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
// For BRP pool, the allocation region includes a "forbidden zone" before the
// pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
BRPPoolSize() + kForbiddenZoneSize);
// Do not free pages for the configurable pool, because its memory is owned
// by someone else, but deinitialize it nonetheless.
setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
setup_.regular_pool_ = 0;
setup_.brp_pool_ = 0;
setup_.configurable_pool_ = 0;
AddressPoolManager::GetInstance()->ResetForTesting();
}
void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
AddressPoolManager::GetInstance()->Remove(setup_.configurable_pool_);
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
setup_.configurable_pool_ = 0;
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
} // namespace partition_alloc::internal

View File

@ -0,0 +1,360 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#include <algorithm>
#include <array>
#include <limits>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/base_export.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#include "build/buildflag.h"
// The feature is not applicable to 32-bit address space.
#if defined(PA_HAS_64_BITS_POINTERS)
namespace partition_alloc {
namespace internal {
// Reserves address space for PartitionAllocator.
class BASE_EXPORT PartitionAddressSpace {
public:
// BRP stands for BackupRefPtr. GigaCage is split into pools, one which
// supports BackupRefPtr and one that doesn't.
static ALWAYS_INLINE internal::pool_handle GetRegularPool() {
return setup_.regular_pool_;
}
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_;
}
#else
static ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
return kRegularPoolBaseMask;
}
#endif
static ALWAYS_INLINE internal::pool_handle GetBRPPool() {
return setup_.brp_pool_;
}
// The Configurable Pool can be created inside an existing mapping and so will
// be located outside PartitionAlloc's GigaCage.
static ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
return setup_.configurable_pool_;
}
static ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
PA_DCHECK(!IsInBRPPool(address));
#endif
pool_handle pool = 0;
uintptr_t base = 0;
if (IsInRegularPool(address)) {
pool = GetRegularPool();
base = setup_.regular_pool_base_address_;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
} else if (IsInBRPPool(address)) {
pool = GetBRPPool();
base = setup_.brp_pool_base_address_;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} else if (IsInConfigurablePool(address)) {
pool = GetConfigurablePool();
base = setup_.configurable_pool_base_address_;
} else {
PA_NOTREACHED();
}
return std::make_pair(pool, address - base);
}
static ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
return kConfigurablePoolMaxSize;
}
static ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
return kConfigurablePoolMinSize;
}
// Initialize the GigaCage and the Pools inside of it.
// This function must only be called from the main thread.
static void Init();
// Initialize the ConfigurablePool at the given address |pool_base|. It must
// be aligned to the size of the pool. The size must be a power of two and
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. This
// function must only be called from the main thread.
static void InitConfigurablePool(uintptr_t pool_base, size_t size);
static void UninitForTesting();
static void UninitConfigurablePoolForTesting();
static ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The
// configurable pool is initialized separately.
if (setup_.regular_pool_) {
PA_DCHECK(setup_.brp_pool_ != 0);
return true;
}
PA_DCHECK(setup_.brp_pool_ == 0);
return false;
}
static ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
return setup_.configurable_pool_base_address_ !=
kUninitializedPoolBaseAddress;
}
// Returns false for nullptr.
static ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
#endif
return (address & regular_pool_base_mask) ==
setup_.regular_pool_base_address_;
}
static ALWAYS_INLINE uintptr_t RegularPoolBase() {
return setup_.regular_pool_base_address_;
}
// Returns false for nullptr.
static ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
#endif
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
}
// Returns false for nullptr.
static ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) ==
setup_.configurable_pool_base_address_;
}
static ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
return setup_.configurable_pool_base_address_;
}
static ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return ::partition_alloc::internal::UnmaskPtr(address) -
setup_.brp_pool_base_address_;
}
// PartitionAddressSpace is static_only class.
PartitionAddressSpace() = delete;
PartitionAddressSpace(const PartitionAddressSpace&) = delete;
void* operator new(size_t) = delete;
void* operator new(size_t, void*) = delete;
private:
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static ALWAYS_INLINE size_t RegularPoolSize();
static ALWAYS_INLINE size_t BRPPoolSize();
#else
// The pool sizes should be as large as maximum whenever possible.
constexpr static ALWAYS_INLINE size_t RegularPoolSize() {
return kRegularPoolSize;
}
constexpr static ALWAYS_INLINE size_t BRPPoolSize() { return kBRPPoolSize; }
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
// where all allocations have a BRP ref-count, thus pointers pointing there
// can use a BRP protection against UaF. Allocations in the other pools don't
// have that.
//
// Pool sizes have to be the power of two. Each pool will be aligned at its
// own size boundary.
//
// NOTE! The BRP pool must be preceded by a reserved region, where allocations
// are forbidden. This is to prevent a pointer immediately past a non-GigaCage
// allocation from falling into the BRP pool, thus triggering BRP mechanism
// and likely crashing. This "forbidden zone" can be as small as 1B, but it's
// simpler to just reserve an allocation granularity unit.
//
// The ConfigurablePool is an optional Pool that can be created inside an
// existing mapping by the embedder, and so will be outside of the GigaCage.
// This Pool can be used when certain PA allocations must be located inside a
// given virtual address region. One use case for this Pool is V8's virtual
// memory cage, which requires that ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
base::bits::IsPowerOfTwo(kBRPPoolSize));
#if BUILDFLAG(IS_IOS)
// TODO(crbug.com/1250788): Remove the iOS special case.
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = kPoolMaxSize;
#else
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
#endif
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize) &&
base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// Masks used to easy determine belonging to a pool.
// On Arm, the top byte of each pointer is ignored (meaning there are
// effectively 256 versions of each valid pointer). 4 bits are used to store
// tags for Arm's Memory Tagging Extension (MTE). To ensure that tagged
// pointers are recognized as being in the pool, mask off the top byte with
// kMemTagUnmask.
static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1;
static constexpr uintptr_t kRegularPoolBaseMask =
~kRegularPoolOffsetMask & kMemTagUnmask;
static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask =
~kBRPPoolOffsetMask & kMemTagUnmask;
#endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// This must be set to such a value that IsIn*Pool() always returns false when
// the pool isn't initialized.
static constexpr uintptr_t kUninitializedPoolBaseAddress =
static_cast<uintptr_t>(-1);
struct GigaCageSetup {
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
constexpr GigaCageSetup()
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
regular_pool_base_mask_(0),
brp_pool_base_mask_(0),
#endif
configurable_pool_base_mask_(0),
regular_pool_(0),
brp_pool_(0),
configurable_pool_(0) {
}
// Using a union to enforce padding.
union {
struct {
uintptr_t regular_pool_base_address_;
uintptr_t brp_pool_base_address_;
uintptr_t configurable_pool_base_address_;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
uintptr_t regular_pool_base_mask_;
uintptr_t brp_pool_base_mask_;
#endif
uintptr_t configurable_pool_base_mask_;
pool_handle regular_pool_;
pool_handle brp_pool_;
pool_handle configurable_pool_;
};
char one_cacheline_[kPartitionCachelineSize];
};
};
static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
"GigaCageSetup has to fill a cacheline(s)");
// See the comment describing the address layout above.
//
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
};
ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
return PartitionAddressSpace::GetPoolAndOffset(address);
}
ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
return std::get<0>(GetPoolAndOffset(address));
}
ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
return PartitionAddressSpace::OffsetInBRPPool(address);
}
} // namespace internal
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
#endif
return internal::PartitionAddressSpace::IsInRegularPool(address)
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|| internal::PartitionAddressSpace::IsInBRPPool(address)
#endif
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInRegularPool(address);
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInBRPPool(address);
}
// Returns false for nullptr.
ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
uintptr_t address) {
return internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
}
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::IsConfigurablePoolAvailable;
using ::partition_alloc::IsManagedByPartitionAlloc;
using ::partition_alloc::IsManagedByPartitionAllocBRPPool;
using ::partition_alloc::IsManagedByPartitionAllocConfigurablePool;
using ::partition_alloc::IsManagedByPartitionAllocRegularPool;
namespace internal {
using ::partition_alloc::internal::GetPool;
using ::partition_alloc::internal::GetPoolAndOffset;
using ::partition_alloc::internal::OffsetInBRPPool;
using ::partition_alloc::internal::PartitionAddressSpace;
} // namespace internal
} // namespace base
#endif // defined(PA_HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -0,0 +1,64 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#include <cstring>
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h"
// Prefetch *x into memory.
#if defined(__clang__) || defined(COMPILER_GCC)
#define PA_PREFETCH(x) __builtin_prefetch(x)
#else
#define PA_PREFETCH(x)
#endif
namespace partition_alloc::internal {
// This is a `memset` that resists being optimized away. Adapted from
// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
// depend on //third_party, and this is small enough.)
ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
memset(ptr, value, size);
// As best as we can tell, this is sufficient to break any optimisations that
// might try to eliminate "superfluous" memsets. If there's an easy way to
// detect memset_s, it would be better to use that.
__asm__ __volatile__("" : : "r"(ptr) : "memory");
}
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics.
#if !DCHECK_IS_ON()
ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (UNLIKELY(counter == 0)) {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(::partition_alloc::internal::RandomValue());
}
// If `counter` is 0, this will wrap. That is intentional and OK.
counter--;
return counter == 0;
}
#endif // !DCHECK_IS_ON()
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::SecureMemset;
#if !DCHECK_IS_ON()
using ::partition_alloc::internal::RandomPeriod;
#endif // !DCHECK_IS_ON()
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_

View File

@ -0,0 +1,113 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc.h"
#include <string.h>
#include <cstdint>
#include <memory>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/dcheck_is_on.h"
namespace base {
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
// This is from page_allocator_constants.h and doesn't really fit here, but
// there isn't a centralized initialization function in page_allocator.cc, so
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
STATIC_ASSERT_OR_PA_CHECK((SystemPageSize() & SystemPageOffsetMask()) == 0,
"SystemPageSize() must be power of 2");
// Two partition pages are used as guard / metadata page so make sure the
// super page size is bigger.
STATIC_ASSERT_OR_PA_CHECK(PartitionPageSize() * 4 <= kSuperPageSize,
"ok super page size");
STATIC_ASSERT_OR_PA_CHECK((kSuperPageSize & SystemPageOffsetMask()) == 0,
"ok super page multiple");
// Four system pages gives us room to hack out a still-guard-paged piece
// of metadata in the middle of a guard partition page.
STATIC_ASSERT_OR_PA_CHECK(SystemPageSize() * 4 <= PartitionPageSize(),
"ok partition page size");
STATIC_ASSERT_OR_PA_CHECK((PartitionPageSize() & SystemPageOffsetMask()) == 0,
"ok partition page multiple");
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
kPageMetadataSize,
"PartitionPage should not be too big");
STATIC_ASSERT_OR_PA_CHECK(
kPageMetadataSize * NumPartitionPagesPerSuperPage() <= SystemPageSize(),
"page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size.
STATIC_ASSERT_OR_PA_CHECK(
MaxDirectMapped() <= (1UL << 31) + DirectMapAllocationGranularity(),
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
static_assert(kSmallestBucket == kAlignment, "generic smallest bucket");
static_assert(kMaxBucketed == 917504, "generic max bucketed");
STATIC_ASSERT_OR_PA_CHECK(
MaxSystemPagesPerRegularSlotSpan() <= 16,
"System pages per slot span must be no greater than 16.");
PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
}
void PartitionAllocGlobalUninitForTesting() {
internal::PCScan::UninitForTesting(); // IN-TEST
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if defined(PA_HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance()->ResetForTesting();
#endif // defined(PA_HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::g_oom_handling_function = nullptr;
}
namespace internal {
template <bool thread_safe>
PartitionAllocator<thread_safe>::~PartitionAllocator() {
MemoryReclaimer::Instance()->UnregisterPartition(&partition_root_);
}
template <bool thread_safe>
void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
#endif
partition_root_.Init(opts);
MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
}
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(USE_BACKUP_REF_PTR)
void CheckThatSlotOffsetIsZero(uintptr_t address) {
// Add kPartitionPastAllocationAdjustment, because
// PartitionAllocGetSlotStartInBRPPool will subtract it.
PA_CHECK(PartitionAllocGetSlotStartInBRPPool(
address + kPartitionPastAllocationAdjustment) == address);
}
#endif
} // namespace internal
} // namespace base

View File

@ -0,0 +1,50 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
namespace base {
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
namespace internal {
template <bool thread_safe>
struct BASE_EXPORT PartitionAllocator {
PartitionAllocator() = default;
~PartitionAllocator();
void init(PartitionOptions);
ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
ALWAYS_INLINE const PartitionRoot<thread_safe>* root() const {
return &partition_root_;
}
private:
PartitionRoot<thread_safe> partition_root_;
};
} // namespace internal
using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
} // namespace base
namespace partition_alloc {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::base::PartitionAllocator;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_

View File

@ -0,0 +1,152 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#include <cstdint>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/check.h"
#include "base/debug/alias.h"
#include "base/immediate_crash.h"
#define PA_STRINGIFY_IMPL(s) #s
#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
// When PartitionAlloc is used as the default allocator, we cannot use the
// regular (D)CHECK() macros, as they allocate internally. When an assertion is
// triggered, they format strings, leading to reentrancy in the code, which none
// of PartitionAlloc is designed to support (and especially not for error
// paths).
//
// As a consequence:
// - When PartitionAlloc is not malloc(), use the regular macros
// - Otherwise, crash immediately. This provides worse error messages though.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// For official build discard log strings to reduce binary bloat.
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
// See base/check.h for implementation details.
#define PA_CHECK(condition) \
UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
#else
// PartitionAlloc uses async-signal-safe RawCheck() for error reporting.
// Async-signal-safe functions are guaranteed to not allocate as otherwise they
// could operate with inconsistent allocator state.
#define PA_CHECK(condition) \
UNLIKELY(!(condition)) \
? logging::RawCheck( \
__FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
: EAT_CHECK_STREAM_PARAMS()
#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG)
#if DCHECK_IS_ON()
#define PA_DCHECK(condition) PA_CHECK(condition)
#else
#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // DCHECK_IS_ON()
#define PA_PCHECK(condition) \
if (!(condition)) { \
int error = errno; \
base::debug::Alias(&error); \
IMMEDIATE_CRASH(); \
}
#else
#define PA_CHECK(condition) CHECK(condition)
#define PA_DCHECK(condition) DCHECK(condition)
#define PA_PCHECK(condition) PCHECK(condition)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Expensive dchecks that run within *Scan. These checks are only enabled in
// debug builds with dchecks enabled.
#if !defined(NDEBUG)
#define PA_SCAN_DCHECK_IS_ON() DCHECK_IS_ON()
#else
#define PA_SCAN_DCHECK_IS_ON() 0
#endif
#if PA_SCAN_DCHECK_IS_ON()
#define PA_SCAN_DCHECK(expr) PA_DCHECK(expr)
#else
#define PA_SCAN_DCHECK(expr) EAT_CHECK_STREAM_PARAMS(!(expr))
#endif
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
// Use this macro to assert on things that are conditionally constexpr as
// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
// is a static_assert. Where determined at run time, this is a PA_CHECK.
// Therefore, this macro must only be used where both a static_assert and a
// PA_CHECK would be viable, that is, within a function, and ideally a function
// that executes only once, early in the program, such as during initialization.
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
static_assert(condition, message)
#else
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
do { \
PA_CHECK(condition) << (message); \
} while (false)
#endif
namespace partition_alloc::internal {
// Used for PA_DEBUG_DATA_ON_STACK, below.
struct alignas(16) DebugKv {
// 16 bytes object aligned on 16 bytes, to make it easier to see in crash
// reports.
char k[8] = {}; // Not necessarily 0-terminated.
uint64_t v = 0;
DebugKv(const char* key, size_t value) {
// Fill with ' ', so that the stack dump is nicer to read. Not using
// memset() on purpose, this header is included from *many* places.
for (int index = 0; index < 8; index++) {
k[index] = ' ';
}
for (int index = 0; index < 8; index++) {
k[index] = key[index];
if (key[index] == '\0')
break;
}
v = value;
}
};
} // namespace partition_alloc::internal
#define PA_CONCAT(x, y) x##y
#define PA_CONCAT2(x, y) PA_CONCAT(x, y)
#define PA_DEBUG_UNIQUE_NAME PA_CONCAT2(kv, __LINE__)
// Puts a key-value pair on the stack for debugging. `base::debug::Alias()`
// makes sure a local variable is saved on the stack, but the variables can be
// hard to find in crash reports, particularly if the frame pointer is not
// present / invalid.
//
// This puts a key right before the value on the stack. The key has to be a C
// string, which gets truncated if it's longer than 8 characters.
// Example use:
// PA_DEBUG_DATA_ON_STACK("size", 0x42)
//
// Sample output in lldb:
// (lldb) x 0x00007fffffffd0d0 0x00007fffffffd0f0
// 0x7fffffffd0d0: 73 69 7a 65 00 00 00 00 42 00 00 00 00 00 00 00
// size............
//
// With gdb, one can use:
// x/8g <STACK_POINTER>
// to see the data. With lldb, "x <STACK_POINTER> <FRAME_POJNTER>" can be used.
#define PA_DEBUG_DATA_ON_STACK(name, value) \
::partition_alloc::internal::DebugKv PA_DEBUG_UNIQUE_NAME{name, value}; \
::base::debug::Alias(&PA_DEBUG_UNIQUE_NAME);
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_

View File

@ -0,0 +1,219 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
#include "base/allocator/buildflags.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h"
// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
// address space. The only known case where address space is 32-bit is NaCl, so
// eliminate it explicitly. static_assert below ensures that others won't slip
// through.
#if defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL)
#define PA_HAS_64_BITS_POINTERS
static_assert(sizeof(void*) == 8, "");
#else
static_assert(sizeof(void*) != 8, "");
#endif
// PCScan supports 64 bits only.
#if defined(PA_HAS_64_BITS_POINTERS)
#define PA_ALLOW_PCSCAN
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && \
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
#define PA_STARSCAN_NEON_SUPPORTED
#endif
#if 0
// Use dynamically sized GigaCage. This allows to query the size at run-time,
// before initialization, instead of using a hardcoded constexpr.
#define PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#include <linux/version.h>
// TODO(bikineev): Enable for ChromeOS.
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
#define PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED
#endif
#endif // defined(PA_HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#if defined(PA_HAS_64_BITS_POINTERS)
// Use card table to avoid races for PCScan configuration without safepoints.
// The card table provides the guaranteee that for a marked card the underling
// super-page is fully initialized.
#define PA_STARSCAN_USE_CARD_TABLE 1
#else
// The card table is permanently disabled for 32-bit.
#define PA_STARSCAN_USE_CARD_TABLE 0
#endif // defined(PA_HAS_64_BITS_POINTERS)
#if PA_STARSCAN_USE_CARD_TABLE && !defined(PA_ALLOW_PCSCAN)
#error "Card table can only be used when *Scan is allowed"
#endif
// Use batched freeing when sweeping pages. This builds up a freelist in the
// scanner thread and appends to the slot-span's freelist only once.
#define PA_STARSCAN_BATCHED_FREE 1
// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
// features such as futex(2).
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
#define PA_HAS_LINUX_KERNEL
#endif
// On some platforms, we implement locking by spinning in userspace, then going
// into the kernel only if there is contention. This requires platform support,
// namely:
// - On Linux, futex(2)
// - On Windows, a fast userspace "try" operation which is available
// with SRWLock
// - Otherwise, a fast userspace pthread_mutex_trylock().
//
// On macOS, pthread_mutex_trylock() is fast by default starting with macOS
// 10.14. Chromium targets an earlier version, so it cannot be known at
// compile-time. So we use something different. On other POSIX systems, we
// assume that pthread_mutex_trylock() is suitable.
//
// Otherwise, a userspace spinlock implementation is used.
#if defined(PA_HAS_LINUX_KERNEL) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) || BUILDFLAG(IS_FUCHSIA)
#define PA_HAS_FAST_MUTEX
#endif
// If set to 1, enables zeroing memory on Free() with roughly 1% probability.
// This applies only to normal buckets, as direct-map allocations are always
// decommitted.
// TODO(bartekn): Re-enable once PartitionAlloc-Everywhere evaluation is done.
#if 0
#define PA_ZERO_RANDOMLY_ON_FREE
#endif
// Need TLS support.
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA)
#define PA_THREAD_CACHE_SUPPORTED
#endif
// Too expensive for official builds, as it adds cache misses to all
// allocations. On the other hand, we want wide metrics coverage to get
// realistic profiles.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !defined(OFFICIAL_BUILD)
#define PA_THREAD_CACHE_ALLOC_STATS
#endif
// Optional statistics collection. Lightweight, contrary to the ones above,
// hence enabled by default.
#define PA_THREAD_CACHE_ENABLE_STATISTICS
// Enable free list shadow entry to strengthen hardening as much as possible.
// The shadow entry is an inversion (bitwise-NOT) of the encoded `next` pointer.
//
// Disabled when ref-count is placed in the previous slot, as it will overlap
// with the shadow for the smallest slots.
//
// Disabled on Big Endian CPUs, because encoding is also a bitwise-NOT there,
// making the shadow entry equal to the original, valid pointer to the next
// slot. In case Use-after-Free happens, we'd rather not hand out a valid,
// ready-to-use pointer.
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && \
defined(ARCH_CPU_LITTLE_ENDIAN)
#define PA_HAS_FREELIST_SHADOW_ENTRY
#endif
// Specifies whether allocation extras need to be added.
#if DCHECK_IS_ON() || BUILDFLAG(USE_BACKUP_REF_PTR)
#define PA_EXTRAS_REQUIRED
#endif
// Count and total wall clock time spent in memory related system calls. This
// doesn't cover all system calls, in particular the ones related to locking.
//
// Not enabled by default, as it has a runtime cost, and causes issues with some
// builds (e.g. Windows).
// However the total count is collected on all platforms.
// #define PA_COUNT_SYSCALL_TIME
// On Windows, |thread_local| variables cannot be marked "dllexport", see
// compiler error C2492 at
// https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2492?view=msvc-160.
// Don't use it there.
//
// On macOS and iOS:
// - With PartitionAlloc-Everywhere, thread_local allocates, reentering the
// allocator.
// - Component builds triggered a clang bug: crbug.com/1243375
//
// Regardless, the "normal" TLS access is fast on x86_64 (see partition_tls.h),
// so don't bother with thread_local anywhere.
#if !(BUILDFLAG(IS_WIN) && defined(COMPONENT_BUILD)) && !BUILDFLAG(IS_APPLE)
#define PA_THREAD_LOCAL_TLS
#endif
// When PartitionAlloc is malloc(), detect malloc() becoming re-entrant by
// calling malloc() again.
//
// Limitations:
// - DCHECK_IS_ON() due to runtime cost
// - thread_local TLS to simplify the implementation
// - Not on Android due to bot failures
#if DCHECK_IS_ON() && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
defined(PA_THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
#define PA_HAS_ALLOCATION_GUARD
#endif
#if defined(ARCH_CPU_ARM64) && defined(__clang__) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
static_assert(sizeof(void*) == 8);
#define PA_HAS_MEMORY_TAGGING
#endif
// Lazy commit should only be enabled on Windows, because commit charge is
// only meaningful and limited on Windows. It affects performance on other
// platforms and is simply not needed there due to OS supporting overcommit.
#if BUILDFLAG(IS_WIN)
constexpr bool kUseLazyCommit = true;
#else
constexpr bool kUseLazyCommit = false;
#endif
// On these platforms, lock all the partitions before fork(), and unlock after.
// This may be required on more platforms in the future.
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#define PA_HAS_ATFORK_HANDLER
#endif
// PartitionAlloc uses PartitionRootEnumerator to acquire all
// PartitionRoots at BeforeFork and to release at AfterFork.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_HAS_ATFORK_HANDLER)
#define PA_USE_PARTITION_ROOT_ENUMERATOR
#endif
// Due to potential conflict with the free list pointer in the "previous slot"
// mode in the smallest bucket, we can't check both the cookie and the dangling
// raw_ptr at the same time.
#if !(BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)) && \
(DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS))
#define PA_REF_COUNT_CHECK_COOKIE
#endif
// Prefer smaller slot spans.
//
// Smaller slot spans may improve dirty memory fragmentation, but may also
// increase address space usage.
//
// This is intended to roll out more broadly, but only enabled on Linux for now
// to get performance bot and real-world data pre-A/B experiment.
#if BUILDFLAG(IS_LINUX)
#define PA_PREFER_SMALLER_SLOT_SPANS
#endif // BUILDFLAG(IS_LINUX)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_

View File

@ -0,0 +1,483 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include <algorithm>
#include <climits>
#include <cstddef>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
#include <mach/vm_page_size.h>
#endif
namespace partition_alloc {
// Bit flag constants used at `flag` argument of PartitionRoot::AllocWithFlags,
// AlignedAllocWithFlags, etc.
struct AllocFlags {
// In order to support bit operations like `flag_a | flag_b`, the old-
// fashioned enum (+ surrounding named struct) is used instead of enum class.
enum : int {
kReturnNull = 1 << 0,
kZeroFill = 1 << 1,
kNoHooks = 1 << 2, // Internal only.
// If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common.
kFastPathOrReturnNull = 1 << 3, // Internal only.
kLastFlag = kFastPathOrReturnNull
};
};
namespace internal {
// Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
// size, but as of 2021, most do. This is in particular the case for almost all
// x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
// static alignment, we cannot query the CPU at runtime to determine the actual
// alignment, so use 64 bytes everywhere. Since this is only used to avoid false
// sharing, getting this wrong only results in lower performance, not incorrect
// code.
constexpr size_t kPartitionCachelineSize = 64;
// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
// It is typical for a `PartitionPage` to be based on multiple system pages.
// Most references to "page" refer to `PartitionPage`s.
//
// *Super pages* are the underlying system allocations we make. Super pages
// contain multiple partition pages and include space for a small amount of
// metadata per partition page.
//
// Inside super pages, we store *slot spans*. A slot span is a continguous range
// of one or more `PartitionPage`s that stores allocations of the same size.
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current maximum slot span size of 64 KiB and
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageShift() {
return 16; // 64 KiB
}
#elif defined(ARCH_CPU_PPC64)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageShift() {
return 18; // 256 KiB
}
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageShift() {
return vm_page_shift + 2;
}
#else
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageShift() {
return 14; // 16 KiB
}
#endif
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageSize() {
return 1 << PartitionPageShift();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageOffsetMask() {
return PartitionPageSize() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageBaseMask() {
return ~PartitionPageOffsetMask();
}
// Number of system pages per regular slot span. Above this limit, we call it
// a single-slot span, as the span literally hosts only one slot, and has
// somewhat different implementation. At run-time, single-slot spans can be
// differentiated with a call to CanStoreRawSize().
// TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
// ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
// To avoid fragmentation via never-used freelist entries, we hand out partition
// freelist sections gradually, in units of the dominant system page size. What
// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
// with freelist pointers right away. Writing freelist pointers will fault and
// dirty a private page, which is very wasteful if we never actually store
// objects there.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
NumSystemPagesPerPartitionPage() {
return PartitionPageSize() >> SystemPageShift();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxSystemPagesPerRegularSlotSpan() {
return NumSystemPagesPerPartitionPage() *
kMaxPartitionPagesPerRegularSlotSpan;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxRegularSlotSpanSize() {
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
}
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
// metadata in the first few pages of each 2 MiB-aligned section. This makes
// freeing memory very fast. 2 MiB size & alignment were chosen, because this
// virtual address block represents a full but single page table allocation on
// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
// (Note, these super pages are backed by 4 KiB system pages and have nothing to
// do with OS concept of "huge pages"/"large pages", even though the size
// coincides.)
//
// The layout of the super page is as follows. The sizes below are the same for
// 32- and 64-bit platforms.
//
// +-----------------------+
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | *Scan State Bitmap |
// | Slot span |
// | Slot span |
// | ... |
// | Slot span |
// | Guard pages (16 KiB) |
// +-----------------------+
//
// State Bitmap is inserted for partitions that may have quarantine enabled.
//
// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
// will be 4KiB.
//
//...
// | Metadata page (4 KiB) |
// | RefcountBitmap (4 KiB)|
// | Guard pages (4 KiB) |
//...
//
// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
// that slot spans of different sizes may co-exist with one super page. Even
// slot spans of the same size may support different slot sizes. However, all
// slots within a span have to be of the same size.
//
// The metadata page has the following format. Note that the `PartitionPage`
// that is not at the head of a slot span is "unused" (by most part, it only
// stores the offset from the head page). In other words, the metadata for the
// slot span is stored only in the first `PartitionPage` of the slot span.
// Metadata accesses to other `PartitionPage`s are redirected to the first
// `PartitionPage`.
//
// +---------------------------------------------+
// | SuperPageExtentEntry (32 B) |
// | PartitionPage of slot span 1 (32 B, used) |
// | PartitionPage of slot span 1 (32 B, unused) |
// | PartitionPage of slot span 1 (32 B, unused) |
// | PartitionPage of slot span 2 (32 B, used) |
// | PartitionPage of slot span 3 (32 B, used) |
// | ... |
// | PartitionPage of slot span N (32 B, used) |
// | PartitionPage of slot span N (32 B, unused) |
// | PartitionPage of slot span N (32 B, unused) |
// +---------------------------------------------+
//
// A direct-mapped page has an identical layout at the beginning to fake it
// looking like a super page:
//
// +---------------------------------+
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | Direct mapped object |
// | Guard page (4 KiB, 32-bit only) |
// +---------------------------------+
//
// A direct-mapped page's metadata page has the following layout (on 64 bit
// architectures. On 32 bit ones, the layout is identical, some sizes are
// different due to smaller pointers.):
//
// +----------------------------------+
// | SuperPageExtentEntry (32 B) |
// | PartitionPage (32 B) |
// | PartitionBucket (40 B) |
// | PartitionDirectMapExtent (32 B) |
// +----------------------------------+
//
// See |PartitionDirectMapMetadata| for details.
constexpr size_t kGiB = 1024 * 1024 * 1024ull;
constexpr size_t kSuperPageShift = 21; // 2 MiB
constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask & kMemTagUnmask;
// GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and
// one that doesn't.
#if defined(PA_HAS_64_BITS_POINTERS)
// The Configurable Pool is only available in 64-bit mode
constexpr size_t kNumPools = 3;
// TODO(crbug.com/1250788): Remove the iOS special case, once larger address
// space can be used there. This limitation isn't meant for releasing, but is ok
// to keep for now only because nothing uses PartitionAlloc on iOS yet.
#if BUILDFLAG(IS_IOS)
constexpr size_t kPoolMaxSize = kGiB / 4;
#elif BUILDFLAG(IS_MAC)
// Special-case macOS. Contrary to other platforms, there is no sandbox limit
// there, meaning that a single renderer could "happily" consume >8GiB. So the
// 8GiB pool size is a regression. Make the limit higher on this platform only
// to be consistent with previous behavior. See crbug.com/1232567 for details.
constexpr size_t kPoolMaxSize = 16 * kGiB;
#else
constexpr size_t kPoolMaxSize = 8 * kGiB;
#endif
#else // defined(PA_HAS_64_BITS_POINTERS)
constexpr size_t kNumPools = 2;
constexpr size_t kPoolMaxSize = 4 * kGiB;
#endif
constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
static constexpr pool_handle kRegularPoolHandle = 1;
static constexpr pool_handle kBRPPoolHandle = 2;
static constexpr pool_handle kConfigurablePoolHandle = 3;
// Slots larger than this size will not receive MTE protection. Pages intended
// for allocations larger than this constant should not be backed with PROT_MTE
// (which saves shadow tag memory). We also save CPU cycles by skipping tagging
// of large areas which are less likely to benefit from MTE protection.
// TODO(Richard.Townsend@arm.com): adjust RecommitSystemPagesForData to skip
// PROT_MTE.
constexpr size_t kMaxMemoryTaggingSize = 1024;
#if defined(PA_HAS_MEMORY_TAGGING)
// Returns whether the tag of |object| overflowed and the containing slot needs
// to be moved to quarantine.
ALWAYS_INLINE bool HasOverflowTag(void* object) {
// The tag with which the slot is put to quarantine.
constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
static_assert((kOverflowTag & ~kMemTagUnmask) != 0,
"Overflow tag must be in tag bits");
return (reinterpret_cast<uintptr_t>(object) & ~kMemTagUnmask) == kOverflowTag;
}
#endif // defined(PA_HAS_MEMORY_TAGGING)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
NumPartitionPagesPerSuperPage() {
return kSuperPageSize >> PartitionPageShift();
}
constexpr ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool;
}
#if defined(PA_HAS_64_BITS_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the GigaCage.
constexpr ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
return kSuperPageSize;
}
constexpr ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift;
}
#else // defined(PA_HAS_64_BITS_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that GigaCage bitmaps
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
DirectMapAllocationGranularity() {
return std::max(PageAllocationGranularity(), PartitionPageSize());
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift());
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() {
return DirectMapAllocationGranularity() - 1;
}
// The "order" of an allocation is closely related to the power-of-1 size of the
// allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts at
// index 1 for the least-significant-bit.
//
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
// PartitionAlloc should return memory properly aligned for any type, to behave
// properly as a generic allocator. This is not strictly required as long as
// types are explicitly allocated with PartitionAlloc, but is to use it as a
// malloc() implementation, and generally to match malloc()'s behavior.
//
// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
// bytes on 64 bit ones.
//
// Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
constexpr size_t kMinBucketedOrder =
kAlignment == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
constexpr size_t kMaxBucketedOrder = 20;
constexpr size_t kNumBucketedOrders =
(kMaxBucketedOrder - kMinBucketedOrder) + 1;
// 4 buckets per order (for the higher orders).
constexpr size_t kNumBucketsPerOrderBits = 2;
constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
constexpr size_t kMaxBucketSpacing =
1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
// Limit when downsizing a direct mapping using `realloc`:
constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
// harder to exploit.
//
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxDirectMapped() {
// Subtract kSuperPageSize to accommodate for granularity inside
// PartitionRoot::GetDirectMapReservationSize.
return (1UL << 31) - kSuperPageSize;
}
// Max alignment supported by AlignedAllocWithFlags().
// kSuperPageSize alignment can't be easily supported, because each super page
// starts with guard pages & metadata.
constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// When a SlotSpan becomes empty, the allocator tries to avoid re-using it
// immediately, to help with fragmentation. At this point, it becomes dirty
// committed memory, which we want to minimize. This could be decommitted
// immediately, but that would imply doing a lot of system calls. In particular,
// for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
// system calls.
//
// As an intermediate step, empty SlotSpans are placed into a per-partition
// global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
// before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
// the place used by a previous one will lead the previous SlotSpan to be
// decommitted immediately, provided that it is still empty.
//
// Setting this value higher means giving more time for reuse to happen, at the
// cost of possibly increasing peak committed memory usage (and increasing the
// size of PartitionRoot a bit, since the ring buffer is there). Note that the
// ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
// *not* removed from it when re-used. So the ring buffer really is a buffer of
// *possibly* empty SlotSpans.
//
// In all cases, PartitionRoot::PurgeMemory() with the
// PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
// in the ring buffer, so with periodic purge enabled, this typically happens
// every few seconds.
constexpr size_t kEmptyCacheIndexBits = 7;
// kMaxFreeableSpans is the buffer size, but is never used as an index value,
// hence <= is appropriate.
constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
// If the total size in bytes of allocated but not committed pages exceeds this
// value (probably it is a "out of virtual address space" crash), a special
// crash stack trace is generated at
// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
// of virtual address space" from "out of physical memory" in crash reports.
constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
// These byte values match tcmalloc.
constexpr unsigned char kUninitializedByte = 0xAB;
constexpr unsigned char kFreedByte = 0xCD;
constexpr unsigned char kQuarantinedByte = 0xEF;
// 1 is smaller than anything we can use, as it is not properly aligned. Not
// using a large size, since PartitionBucket::slot_size is a uint32_t, and
// static_cast<uint32_t>(-1) is too close to a "real" size.
constexpr size_t kInvalidBucketSize = 1;
} // namespace internal
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::AllocFlags;
using ::partition_alloc::internal::DirectMapAllocationGranularity;
using ::partition_alloc::internal::DirectMapAllocationGranularityOffsetMask;
using ::partition_alloc::internal::DirectMapAllocationGranularityShift;
#if defined(PA_HAS_MEMORY_TAGGING)
using ::partition_alloc::internal::HasOverflowTag;
#endif // defined(PA_HAS_MEMORY_TAGGING)
using ::partition_alloc::internal::kBitsPerSizeT;
using ::partition_alloc::internal::kBRPPoolHandle;
using ::partition_alloc::internal::kConfigurablePoolHandle;
using ::partition_alloc::internal::kDefaultEmptySlotSpanRingSize;
using ::partition_alloc::internal::kEmptyCacheIndexBits;
using ::partition_alloc::internal::kFreedByte;
using ::partition_alloc::internal::kGiB;
using ::partition_alloc::internal::kInvalidBucketSize;
using ::partition_alloc::internal::kMaxBucketed;
using ::partition_alloc::internal::kMaxBucketedOrder;
using ::partition_alloc::internal::kMaxBucketSpacing;
using ::partition_alloc::internal::kMaxFreeableSpans;
using ::partition_alloc::internal::kMaxMemoryTaggingSize;
using ::partition_alloc::internal::kMaxPartitionPagesPerRegularSlotSpan;
using ::partition_alloc::internal::kMaxSuperPagesInPool;
using ::partition_alloc::internal::kMaxSupportedAlignment;
using ::partition_alloc::internal::kMinBucketedOrder;
using ::partition_alloc::internal::kMinDirectMappedDownsize;
using ::partition_alloc::internal::kNumBucketedOrders;
using ::partition_alloc::internal::kNumBuckets;
using ::partition_alloc::internal::kNumBucketsPerOrder;
using ::partition_alloc::internal::kNumBucketsPerOrderBits;
using ::partition_alloc::internal::kNumPools;
using ::partition_alloc::internal::kPartitionCachelineSize;
using ::partition_alloc::internal::kPoolMaxSize;
using ::partition_alloc::internal::kQuarantinedByte;
using ::partition_alloc::internal::kReasonableSizeOfUnusedPages;
using ::partition_alloc::internal::kRegularPoolHandle;
using ::partition_alloc::internal::kSmallestBucket;
using ::partition_alloc::internal::kSuperPageAlignment;
using ::partition_alloc::internal::kSuperPageBaseMask;
using ::partition_alloc::internal::kSuperPageOffsetMask;
using ::partition_alloc::internal::kSuperPageShift;
using ::partition_alloc::internal::kSuperPageSize;
using ::partition_alloc::internal::kUninitializedByte;
using ::partition_alloc::internal::MaxDirectMapped;
using ::partition_alloc::internal::MaxRegularSlotSpanSize;
using ::partition_alloc::internal::MaxSuperPagesInPool;
using ::partition_alloc::internal::MaxSystemPagesPerRegularSlotSpan;
using ::partition_alloc::internal::NumPartitionPagesPerSuperPage;
using ::partition_alloc::internal::NumSystemPagesPerPartitionPage;
using ::partition_alloc::internal::PartitionPageBaseMask;
using ::partition_alloc::internal::PartitionPageOffsetMask;
using ::partition_alloc::internal::PartitionPageShift;
using ::partition_alloc::internal::PartitionPageSize;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_

View File

@ -0,0 +1,129 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
#include <algorithm>
#include <cstddef>
#include "base/allocator/buildflags.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
namespace partition_alloc {
namespace internal {
// Alignment has two constraints:
// - Alignment requirement for scalar types: alignof(std::max_align_t)
// - Alignment requirement for operator new().
//
// The two are separate on Windows 64 bits, where the first one is 8 bytes, and
// the second one 16. We could technically return something different for
// malloc() and operator new(), but this would complicate things, and most of
// our allocations are presumably coming from operator new() anyway.
//
// __STDCPP_DEFAULT_NEW_ALIGNMENT__ is C++17. As such, it is not defined on all
// platforms, as Chrome's requirement is C++14 as of 2020.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
constexpr size_t kAlignment =
std::max(alignof(max_align_t),
static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
#else
constexpr size_t kAlignment = alignof(max_align_t);
#endif
static_assert(kAlignment <= 16,
"PartitionAlloc doesn't support a fundamental alignment larger "
"than 16 bytes.");
constexpr bool ThreadSafe = true;
template <bool thread_safe>
struct SlotSpanMetadata;
} // namespace internal
class PartitionStatsDumper;
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionStatsDumper;
using ::partition_alloc::internal::kAlignment;
namespace internal {
using ::partition_alloc::internal::SlotSpanMetadata;
using ::partition_alloc::internal::ThreadSafe;
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(USE_BACKUP_REF_PTR)
BASE_EXPORT void CheckThatSlotOffsetIsZero(uintptr_t address);
#endif
} // namespace internal
template <bool thread_safe = true>
struct PartitionRoot;
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
} // namespace base
namespace partition_alloc {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::base::PartitionRoot;
namespace internal {
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(USE_BACKUP_REF_PTR)
using ::base::internal::CheckThatSlotOffsetIsZero;
#endif
} // namespace internal
} // namespace partition_alloc
// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
//
// The malloc attribute indicates that the function acts like a system memory
// allocation function, returning a pointer to allocated storage disjoint from
// the storage for any other object accessible to the caller.
//
// Note that it doesn't apply to realloc()-type functions, as they can return
// the same pointer as the one passed as a parameter, as noted in e.g. stdlib.h
// on Linux systems.
#if defined(__has_attribute)
#if __has_attribute(malloc)
#define PA_MALLOC_FN __attribute__((malloc))
#endif
// Allows the compiler to assume that the return value is aligned on a
// kAlignment boundary. This is useful for e.g. using aligned vector
// instructions in the constructor for zeroing.
#if __has_attribute(assume_aligned)
#define PA_MALLOC_ALIGNED \
__attribute__((assume_aligned(::partition_alloc::internal::kAlignment)))
#endif
#endif // defined(__has_attribute)
#if !defined(PA_MALLOC_FN)
#define PA_MALLOC_FN
#endif
#if !defined(PA_MALLOC_ALIGNED)
#define PA_MALLOC_ALIGNED
#endif
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_

View File

@ -0,0 +1,121 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include <ostream>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_lock.h"
namespace partition_alloc {
namespace {
internal::Lock g_hook_lock;
internal::Lock& GetHooksLock() {
return g_hook_lock;
}
} // namespace
std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
std::atomic<PartitionAllocHooks::AllocationObserverHook*>
PartitionAllocHooks::allocation_observer_hook_(nullptr);
std::atomic<PartitionAllocHooks::FreeObserverHook*>
PartitionAllocHooks::free_observer_hook_(nullptr);
std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
PartitionAllocHooks::allocation_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::FreeOverrideHook*>
PartitionAllocHooks::free_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
PartitionAllocHooks::realloc_override_hook_(nullptr);
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) {
internal::ScopedGuard guard(GetHooksLock());
// Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to
// overwrite a hook.
PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
(!alloc_hook && !free_hook))
<< "Overwriting already set observer hooks";
allocation_observer_hook_ = alloc_hook;
free_observer_hook_ = free_hook;
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
}
void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
FreeOverrideHook* free_hook,
ReallocOverrideHook realloc_hook) {
internal::ScopedGuard guard(GetHooksLock());
PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
!realloc_override_hook_) ||
(!alloc_hook && !free_hook && !realloc_hook))
<< "Overwriting already set override hooks";
allocation_override_hook_ = alloc_hook;
free_override_hook_ = free_hook;
realloc_override_hook_ = realloc_hook;
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
}
void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address,
size_t size,
const char* type_name) {
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name);
}
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
void** out,
int flags,
size_t size,
const char* type_name) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name);
return false;
}
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address);
}
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address);
return false;
}
void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
void* new_address,
size_t size,
const char* type_name) {
// Report a reallocation as a free followed by an allocation.
AllocationObserverHook* allocation_hook =
allocation_observer_hook_.load(std::memory_order_relaxed);
FreeObserverHook* free_hook =
free_observer_hook_.load(std::memory_order_relaxed);
if (allocation_hook && free_hook) {
free_hook(old_address);
allocation_hook(new_address, size, type_name);
}
}
bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
void* address) {
if (ReallocOverrideHook* hook =
realloc_override_hook_.load(std::memory_order_relaxed)) {
return hook(out, address);
}
return false;
}
} // namespace partition_alloc

View File

@ -0,0 +1,93 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
#include <atomic>
#include <cstddef>
#include "base/base_export.h"
namespace partition_alloc {
// PartitionAlloc supports setting hooks to observe allocations/frees as they
// occur as well as 'override' hooks that allow overriding those operations.
class BASE_EXPORT PartitionAllocHooks {
public:
// Log allocation and free events.
typedef void AllocationObserverHook(void* address,
size_t size,
const char* type_name);
typedef void FreeObserverHook(void* address);
// If it returns true, the allocation has been overridden with the pointer in
// *out.
typedef bool AllocationOverrideHook(void** out,
int flags,
size_t size,
const char* type_name);
// If it returns true, then the allocation was overridden and has been freed.
typedef bool FreeOverrideHook(void* address);
// If it returns true, the underlying allocation is overridden and *out holds
// the size of the underlying allocation.
typedef bool ReallocOverrideHook(size_t* out, void* address);
// To unhook, call Set*Hooks with nullptrs.
static void SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook);
static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
FreeOverrideHook* free_hook,
ReallocOverrideHook realloc_hook);
// Helper method to check whether hooks are enabled. This is an optimization
// so that if a function needs to call observer and override hooks in two
// different places this value can be cached and only loaded once.
static bool AreHooksEnabled() {
return hooks_enabled_.load(std::memory_order_relaxed);
}
static void AllocationObserverHookIfEnabled(void* address,
size_t size,
const char* type_name);
static bool AllocationOverrideHookIfEnabled(void** out,
int flags,
size_t size,
const char* type_name);
static void FreeObserverHookIfEnabled(void* address);
static bool FreeOverrideHookIfEnabled(void* address);
static void ReallocObserverHookIfEnabled(void* old_address,
void* new_address,
size_t size,
const char* type_name);
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
private:
// Single bool that is used to indicate whether observer or allocation hooks
// are set to reduce the numbers of loads required to check whether hooking is
// enabled.
static std::atomic<bool> hooks_enabled_;
// Lock used to synchronize Set*Hooks calls.
static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
static std::atomic<FreeObserverHook*> free_observer_hook_;
static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
static std::atomic<FreeOverrideHook*> free_override_hook_;
static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
};
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionAllocHooks;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_

View File

@ -0,0 +1,54 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/logging_buildflags.h"
// When PartitionAlloc is used as the default allocator, we cannot use the
// regular (D)CHECK() macros, as they allocate internally. (c.f. //
// base/allocator/partition_allocator/partition_alloc_check.h)
// So PA_NOTREACHED() uses PA_DCHECK() instead of DCHECK().
#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
#define PA_NOTREACHED() \
true ? logging::RawError(__FILE__ \
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
: EAT_CHECK_STREAM_PARAMS()
#elif BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(OFFICIAL_BUILD) && \
defined(NDEBUG) && DCHECK_IS_ON()
// PA_DCHECK(condition) is PA_CHECK(condition) if DCHECK_IS_ON().
// When BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC), OFFICIAL_BUILD, NDEBUG are
// defined, PA_CHECK(false) is IMMEDIATE_CRASH(). Since IMMEDIATE_CRASH()
// hints __builtin_unreachable() to the compiler, the following code causes
// compile failure:
// switch(...) {
// ...
// case X:
// PA_DCHECK(false);
// [[fallthrough]]; // The compiler knows "not reached".
// case Y:
// ...
// So define PA_NOTREACHED() by using async-signal-safe RawCheck().
#define PA_NOTREACHED() \
UNLIKELY(true) \
? logging::RawCheck(__FILE__ \
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
: EAT_CHECK_STREAM_PARAMS()
#else
// PA_CHECK() uses RawCheck() for error reporting. So "PA_DCHECK(false);
// [[fallthrough]];" doesn't cause compile failure.
#define PA_NOTREACHED() PA_DCHECK(false)
#endif
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,202 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#include <cstddef>
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/thread_annotations.h"
namespace partition_alloc::internal {
constexpr inline int kPartitionNumSystemPagesPerSlotSpanBits = 8;
// Visible for testing.
BASE_EXPORT uint8_t
ComputeSystemPagesPerSlotSpan(size_t slot_size, bool prefer_smaller_slot_spans);
template <bool thread_safe>
struct PartitionBucket {
// Accessed most in hot path => goes first. Only nullptr for invalid buckets,
// may be pointing to the sentinel.
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
uint32_t slot_size;
uint32_t num_system_pages_per_slot_span
: kPartitionNumSystemPagesPerSlotSpanBits;
uint32_t num_full_slot_spans : 24;
// `slot_size_reciprocal` is used to improve the performance of
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
// chosen to provide the desired accuracy. As a result, we can replace a slow
// integer division (or modulo) operation with a pair of multiplication and a
// bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
uint64_t slot_size_reciprocal;
// This is `M` from the formula above. For accurate results, both `value` and
// `size`, which are bound by `kMaxBucketed` for our purposes, must be less
// than `2 ** (M / 2)`. On the other hand, the result of the expression
// `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
static constexpr uint64_t kReciprocalShift = 42;
static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
static_assert(
kMaxBucketed < (1 << (kReciprocalShift / 2)),
"GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
"large.");
// Public API.
void Init(uint32_t new_slot_size);
// Sets |is_already_zeroed| to true if the allocation was satisfied by
// requesting (a) new page(s) from the operating system, or false otherwise.
// This enables an optimization for when callers use
// |AllocFlags::kZeroFill|: there is no need to call memset on fresh
// pages; the OS has already zeroed them. (See
// |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in SlotSpanMetadata.
BASE_EXPORT NOINLINE uintptr_t SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size,
size_t slot_span_alignment,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
ALWAYS_INLINE bool CanStoreRawSize() const {
// For direct-map as well as single-slot slot spans (recognized by checking
// against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
// subsequent PartitionPage to store the raw size. It isn't only metadata
// space though, slot spans that have more than one slot can't have raw size
// stored, because we wouldn't know which slot it applies to.
if (LIKELY(slot_size <= MaxRegularSlotSpanSize()))
return false;
PA_DCHECK((slot_size % SystemPageSize()) == 0);
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
return true;
}
// Some buckets are pseudo-buckets, which are disabled because they would
// otherwise not fulfill alignment constraints.
ALWAYS_INLINE bool is_valid() const {
return active_slot_spans_head != nullptr;
}
ALWAYS_INLINE bool is_direct_mapped() const {
return !num_system_pages_per_slot_span;
}
ALWAYS_INLINE size_t get_bytes_per_span() const {
// Cannot overflow, num_system_pages_per_slot_span is a bitfield, and 255
// pages fit in a size_t.
static_assert(kPartitionNumSystemPagesPerSlotSpanBits <= 8, "");
return num_system_pages_per_slot_span << SystemPageShift();
}
ALWAYS_INLINE size_t get_slots_per_span() const {
size_t ret = GetSlotNumber(get_bytes_per_span());
PA_DCHECK(ret <= SlotSpanMetadata<thread_safe>::kMaxSlotsPerSlotSpan);
return ret;
}
// Returns a natural number of partition pages (calculated by
// ComputeSystemPagesPerSlotSpan()) to allocate from the current super page
// when the bucket runs out of slots.
ALWAYS_INLINE size_t get_pages_per_slot_span() const {
// Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
return (num_system_pages_per_slot_span +
(NumSystemPagesPerPartitionPage() - 1)) /
NumSystemPagesPerPartitionPage();
}
// This helper function scans a bucket's active slot span list for a suitable
// new active slot span. When it finds a suitable new active slot span (one
// that has free slots and is not empty), it is set as the new active slot
// span. If there is no suitable new active slot span, the current active slot
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
// slot spans are scanned, they are tidied up according to their state. Empty
// slot spans are swept on to the empty list, decommitted slot spans on to the
// decommitted list and full slot spans are unlinked from any list.
//
// This is where the guts of the bucket maintenance is done!
bool SetNewActiveSlotSpan();
// Walks the entire active slot span list, and perform regular maintenance,
// where empty, decommitted and full slot spans are moved to their
// steady-state place.
BASE_EXPORT void MaintainActiveList();
// Returns a slot number starting from the beginning of the slot span.
ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) const {
// See the static assertion for `kReciprocalShift` above.
PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
PA_DCHECK(slot_size <= kMaxBucketed);
const size_t offset_in_slot =
((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
return offset_in_slot;
}
// Sort the freelists of all slot spans.
void SortSlotSpanFreelists();
private:
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be initialized.
// Returns nullptr on error.
ALWAYS_INLINE SlotSpanMetadata<thread_safe>* AllocNewSlotSpan(
PartitionRoot<thread_safe>* root,
int flags,
size_t slot_span_alignment) EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
// Allocates a new super page from the current extent, if possible. All
// slot-spans will be in the decommitted state. Returns the address of the
// super page's payload, or 0 on error.
ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot<thread_safe>* root,
int flags)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
// Each bucket allocates a slot span when it runs out of slots.
// A slot span's size is equal to get_pages_per_slot_span() number of
// partition pages. This function initializes all PartitionPage within the
// span to point to the first PartitionPage which holds all the metadata
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
// as the owner of the span. It does NOT put the slots into the bucket's
// freelist.
ALWAYS_INLINE void InitializeSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span);
// Commit 1 or more pages in |slot_span|, enough to get the next slot, which
// is returned by this function. If more slots fit into the committed pages,
// they'll be added to the free list of the slot span (note that next pointers
// are stored inside the slots).
// The free list must be empty when calling this function.
//
// If |slot_span| was freshly allocated, it must have been passed through
// InitializeSlotSpan() first.
ALWAYS_INLINE uintptr_t
ProvisionMoreSlotsAndAllocOne(PartitionRoot<thread_safe>* root,
SlotSpanMetadata<thread_safe>* slot_span)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
};
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionBucket;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_

View File

@ -0,0 +1,280 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
namespace partition_alloc::internal {
// Don't use an anonymous namespace for the constants because it can inhibit
// collapsing them together, even when they are tagged as inline.
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
// Order is 6 (1 << 6-1) == 32 is highest bit set.
// order_index is the next three MSB == 010 == 2.
// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
// for the sub_order_index).
constexpr uint8_t OrderIndexShift(uint8_t order) {
if (order < kNumBucketsPerOrderBits + 1)
return 0;
return order - (kNumBucketsPerOrderBits + 1);
}
constexpr size_t OrderSubIndexMask(uint8_t order) {
if (order == kBitsPerSizeT)
return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
return ((static_cast<size_t>(1) << order) - 1) >>
(kNumBucketsPerOrderBits + 1);
}
#if defined(PA_HAS_64_BITS_POINTERS)
#define PA_BITS_PER_SIZE_T 64
static_assert(kBitsPerSizeT == 64, "");
#else
#define PA_BITS_PER_SIZE_T 32
static_assert(kBitsPerSizeT == 32, "");
#endif // defined(PA_HAS_64_BITS_POINTERS)
inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),
OrderIndexShift(3), OrderIndexShift(4), OrderIndexShift(5),
OrderIndexShift(6), OrderIndexShift(7), OrderIndexShift(8),
OrderIndexShift(9), OrderIndexShift(10), OrderIndexShift(11),
OrderIndexShift(12), OrderIndexShift(13), OrderIndexShift(14),
OrderIndexShift(15), OrderIndexShift(16), OrderIndexShift(17),
OrderIndexShift(18), OrderIndexShift(19), OrderIndexShift(20),
OrderIndexShift(21), OrderIndexShift(22), OrderIndexShift(23),
OrderIndexShift(24), OrderIndexShift(25), OrderIndexShift(26),
OrderIndexShift(27), OrderIndexShift(28), OrderIndexShift(29),
OrderIndexShift(30), OrderIndexShift(31), OrderIndexShift(32),
#if PA_BITS_PER_SIZE_T == 64
OrderIndexShift(33), OrderIndexShift(34), OrderIndexShift(35),
OrderIndexShift(36), OrderIndexShift(37), OrderIndexShift(38),
OrderIndexShift(39), OrderIndexShift(40), OrderIndexShift(41),
OrderIndexShift(42), OrderIndexShift(43), OrderIndexShift(44),
OrderIndexShift(45), OrderIndexShift(46), OrderIndexShift(47),
OrderIndexShift(48), OrderIndexShift(49), OrderIndexShift(50),
OrderIndexShift(51), OrderIndexShift(52), OrderIndexShift(53),
OrderIndexShift(54), OrderIndexShift(55), OrderIndexShift(56),
OrderIndexShift(57), OrderIndexShift(58), OrderIndexShift(59),
OrderIndexShift(60), OrderIndexShift(61), OrderIndexShift(62),
OrderIndexShift(63), OrderIndexShift(64)
#endif
};
inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
OrderSubIndexMask(0), OrderSubIndexMask(1), OrderSubIndexMask(2),
OrderSubIndexMask(3), OrderSubIndexMask(4), OrderSubIndexMask(5),
OrderSubIndexMask(6), OrderSubIndexMask(7), OrderSubIndexMask(8),
OrderSubIndexMask(9), OrderSubIndexMask(10), OrderSubIndexMask(11),
OrderSubIndexMask(12), OrderSubIndexMask(13), OrderSubIndexMask(14),
OrderSubIndexMask(15), OrderSubIndexMask(16), OrderSubIndexMask(17),
OrderSubIndexMask(18), OrderSubIndexMask(19), OrderSubIndexMask(20),
OrderSubIndexMask(21), OrderSubIndexMask(22), OrderSubIndexMask(23),
OrderSubIndexMask(24), OrderSubIndexMask(25), OrderSubIndexMask(26),
OrderSubIndexMask(27), OrderSubIndexMask(28), OrderSubIndexMask(29),
OrderSubIndexMask(30), OrderSubIndexMask(31), OrderSubIndexMask(32),
#if PA_BITS_PER_SIZE_T == 64
OrderSubIndexMask(33), OrderSubIndexMask(34), OrderSubIndexMask(35),
OrderSubIndexMask(36), OrderSubIndexMask(37), OrderSubIndexMask(38),
OrderSubIndexMask(39), OrderSubIndexMask(40), OrderSubIndexMask(41),
OrderSubIndexMask(42), OrderSubIndexMask(43), OrderSubIndexMask(44),
OrderSubIndexMask(45), OrderSubIndexMask(46), OrderSubIndexMask(47),
OrderSubIndexMask(48), OrderSubIndexMask(49), OrderSubIndexMask(50),
OrderSubIndexMask(51), OrderSubIndexMask(52), OrderSubIndexMask(53),
OrderSubIndexMask(54), OrderSubIndexMask(55), OrderSubIndexMask(56),
OrderSubIndexMask(57), OrderSubIndexMask(58), OrderSubIndexMask(59),
OrderSubIndexMask(60), OrderSubIndexMask(61), OrderSubIndexMask(62),
OrderSubIndexMask(63), OrderSubIndexMask(64)
#endif
};
// The class used to generate the bucket lookup table at compile-time.
class BucketIndexLookup final {
public:
ALWAYS_INLINE constexpr static size_t GetIndexForDenserBuckets(size_t size);
ALWAYS_INLINE constexpr static size_t GetIndex(size_t size);
constexpr BucketIndexLookup() {
constexpr uint16_t sentinel_bucket_index = kNumBuckets;
InitBucketSizes();
uint16_t* bucket_index_ptr = &bucket_index_lookup_[0];
uint16_t bucket_index = 0;
// Very small allocations, smaller than the first bucketed order ->
// everything goes to the first bucket.
for (uint8_t order = 0; order < kMinBucketedOrder; ++order) {
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
*bucket_index_ptr++ = 0;
}
}
// Normal buckets.
for (uint8_t order = kMinBucketedOrder; order <= kMaxBucketedOrder;
++order) {
size_t size = static_cast<size_t>(1) << (order - 1);
size_t current_increment = size >> kNumBucketsPerOrderBits;
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
*bucket_index_ptr++ = bucket_index;
// For small sizes, buckets are close together (current_increment is
// small). For instance, for:
// - kAlignment == 16 (which is the case on most 64 bit systems)
// - kNumBucketsPerOrder == 4
//
// The 3 next buckets after 16 are {20, 24, 28}. None of these are a
// multiple of kAlignment, so they use the next bucket, that is 32 here.
if (size % kAlignment != 0) {
PA_DCHECK(bucket_sizes_[bucket_index] > size);
// Do not increment bucket_index, since in the example above
// current_size may be 20, and bucket_sizes_[bucket_index] == 32.
} else {
PA_DCHECK(bucket_sizes_[bucket_index] == size);
bucket_index++;
}
size += current_increment;
}
}
// Direct-mapped, and overflow.
for (uint8_t order = kMaxBucketedOrder + 1; order <= kBitsPerSizeT;
++order) {
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
*bucket_index_ptr++ = sentinel_bucket_index;
}
}
// Smaller because some buckets are not valid due to alignment constraints.
PA_DCHECK(bucket_index < kNumBuckets);
PA_DCHECK(bucket_index_ptr == bucket_index_lookup_ + ((kBitsPerSizeT + 1) *
kNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order.
*bucket_index_ptr = sentinel_bucket_index;
}
constexpr const size_t* bucket_sizes() const { return &bucket_sizes_[0]; }
private:
constexpr void InitBucketSizes() {
size_t current_size = kSmallestBucket;
size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
size_t* bucket_size = &bucket_sizes_[0];
for (size_t i = 0; i < kNumBucketedOrders; ++i) {
for (size_t j = 0; j < kNumBucketsPerOrder; ++j) {
// All bucket sizes have to be multiples of kAlignment, skip otherwise.
if (current_size % kAlignment == 0) {
*bucket_size = current_size;
++bucket_size;
}
current_size += current_increment;
}
current_increment <<= 1;
}
// The remaining buckets are invalid.
while (bucket_size < bucket_sizes_ + kNumBuckets) {
*(bucket_size++) = kInvalidBucketSize;
}
}
size_t bucket_sizes_[kNumBuckets]{};
// The bucket lookup table lets us map a size_t to a bucket quickly.
// The trailing +1 caters for the overflow case for very large allocation
// sizes. It is one flat array instead of a 2D array because in the 2D
// world, we'd need to index array[blah][max+1] which risks undefined
// behavior.
uint16_t
bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
};
ALWAYS_INLINE constexpr size_t RoundUpToPowerOfTwo(size_t size) {
const size_t n = 1 << base::bits::Log2Ceiling(static_cast<uint32_t>(size));
PA_DCHECK(size <= n);
return n;
}
ALWAYS_INLINE constexpr size_t RoundUpSize(size_t size) {
const size_t next_power = RoundUpToPowerOfTwo(size);
const size_t prev_power = next_power >> 1;
PA_DCHECK(size <= next_power);
PA_DCHECK(prev_power < size);
if (size <= prev_power * 5 / 4) {
return prev_power * 5 / 4;
} else {
return next_power;
}
}
// static
ALWAYS_INLINE constexpr size_t BucketIndexLookup::GetIndex(size_t size) {
// For any order 2^N, under the denser bucket distribution ("Distribution A"),
// we have 4 evenly distributed buckets: 2^N, 1.25*2^N, 1.5*2^N, and 1.75*2^N.
// These numbers represent the maximum size of an allocation that can go into
// a given bucket.
//
// Under the less dense bucket distribution ("Distribution B"), we only have
// 2 buckets for the same order 2^N: 2^N and 1.25*2^N.
//
// Everything that would be mapped to the last two buckets of an order under
// Distribution A is instead mapped to the first bucket of the next order
// under Distribution B. The following diagram shows roughly what this looks
// like for the order starting from 2^10, as an example.
//
// A: ... | 2^10 | 1.25*2^10 | 1.5*2^10 | 1.75*2^10 | 2^11 | ...
// B: ... | 2^10 | 1.25*2^10 | -------- | --------- | 2^11 | ...
//
// So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
// Distribution A, but to the 2^11 bucket under Distribution B.
if (1 << 8 < size && size < 1 << 19)
return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
else
return BucketIndexLookup::GetIndexForDenserBuckets(size);
}
// static
ALWAYS_INLINE constexpr size_t BucketIndexLookup::GetIndexForDenserBuckets(
size_t size) {
// This forces the bucket table to be constant-initialized and immediately
// materialized in the binary.
constexpr BucketIndexLookup lookup{};
const uint8_t order =
kBitsPerSizeT - base::bits::CountLeadingZeroBitsSizeT(size);
// The order index is simply the next few bits after the most significant
// bit.
const size_t order_index =
(size >> kOrderIndexShift[order]) & (kNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
const size_t sub_order_index = size & kOrderSubIndexMask[order];
const uint16_t index =
lookup.bucket_index_lookup_[(order << kNumBucketsPerOrderBits) +
order_index + !!sub_order_index];
PA_DCHECK(index <= kNumBuckets); // Last one is the sentinel bucket.
return index;
}
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::BucketIndexLookup;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_

Some files were not shown because too many files have changed in this diff Show More