mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-11-22 13:26:12 +03:00
Import chromium-124.0.6367.54
This commit is contained in:
commit
0ab18a06d5
41
src/.clang-format
Normal file
41
src/.clang-format
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# Defines the Chromium style for automatic reformatting.
|
||||||
|
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||||
|
BasedOnStyle: Chromium
|
||||||
|
# This defaults to 'Auto'. Explicitly set it for a while, so that
|
||||||
|
# 'vector<vector<int> >' in existing files gets formatted to
|
||||||
|
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
|
||||||
|
# 'int>>' if the file already contains at least one such instance.)
|
||||||
|
Standard: Cpp11
|
||||||
|
|
||||||
|
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
|
||||||
|
# the Chromium style (is implied by BasedOnStyle: Chromium).
|
||||||
|
InsertBraces: true
|
||||||
|
InsertNewlineAtEOF: true
|
||||||
|
|
||||||
|
# Make sure code like:
|
||||||
|
# IPC_BEGIN_MESSAGE_MAP()
|
||||||
|
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
|
||||||
|
# IPC_END_MESSAGE_MAP()
|
||||||
|
# gets correctly indented.
|
||||||
|
MacroBlockBegin: "^\
|
||||||
|
BEGIN_MSG_MAP|\
|
||||||
|
BEGIN_MSG_MAP_EX|\
|
||||||
|
BEGIN_SAFE_MSG_MAP_EX|\
|
||||||
|
CR_BEGIN_MSG_MAP_EX|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN_WITH_PARENT|\
|
||||||
|
IPC_STRUCT_TRAITS_BEGIN|\
|
||||||
|
POLPARAMS_BEGIN|\
|
||||||
|
PPAPI_BEGIN_MESSAGE_MAP$"
|
||||||
|
MacroBlockEnd: "^\
|
||||||
|
CR_END_MSG_MAP|\
|
||||||
|
END_MSG_MAP|\
|
||||||
|
IPC_END_MESSAGE_MAP|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
|
||||||
|
IPC_STRUCT_END|\
|
||||||
|
IPC_STRUCT_TRAITS_END|\
|
||||||
|
POLPARAMS_END|\
|
||||||
|
PPAPI_END_MESSAGE_MAP$"
|
58
src/.gitattributes
vendored
Normal file
58
src/.gitattributes
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Stop Windows python license check presubmit errors by forcing LF checkout.
|
||||||
|
*.py text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
|
||||||
|
/net/http/*.pins text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout for all source files
|
||||||
|
*.bin binary
|
||||||
|
*.c text eol=lf
|
||||||
|
*.cc text eol=lf
|
||||||
|
*.cpp text eol=lf
|
||||||
|
*.csv text eol=lf
|
||||||
|
*.grd text eol=lf
|
||||||
|
*.grdp text eol=lf
|
||||||
|
*.gn text eol=lf
|
||||||
|
*.gni text eol=lf
|
||||||
|
*.h text eol=lf
|
||||||
|
*.html text eol=lf
|
||||||
|
*.idl text eol=lf
|
||||||
|
*.in text eol=lf
|
||||||
|
*.inc text eol=lf
|
||||||
|
*.java text eol=lf
|
||||||
|
*.js text eol=lf
|
||||||
|
*.json text eol=lf
|
||||||
|
*.json5 text eol=lf
|
||||||
|
*.md text eol=lf
|
||||||
|
*.mm text eol=lf
|
||||||
|
*.mojom text eol=lf
|
||||||
|
*.pdf -diff
|
||||||
|
*.proto text eol=lf
|
||||||
|
*.rs text eol=lf
|
||||||
|
*.sh text eol=lf
|
||||||
|
*.sql text eol=lf
|
||||||
|
*.toml text eol=lf
|
||||||
|
*.txt text eol=lf
|
||||||
|
*.xml text eol=lf
|
||||||
|
*.xslt text eol=lf
|
||||||
|
.clang-format text eol=lf
|
||||||
|
.eslintrc.js text eol=lf
|
||||||
|
.git-blame-ignore-revs text eol=lf
|
||||||
|
.gitattributes text eol=lf
|
||||||
|
.gitignore text eol=lf
|
||||||
|
.vpython text eol=lf
|
||||||
|
codereview.settings text eol=lf
|
||||||
|
DEPS text eol=lf
|
||||||
|
ATL_OWNERS text eol=lf
|
||||||
|
LICENSE text eol=lf
|
||||||
|
LICENSE.* text eol=lf
|
||||||
|
MAJOR_BRANCH_DATE text eol=lf
|
||||||
|
OWNERS text eol=lf
|
||||||
|
README text eol=lf
|
||||||
|
README.* text eol=lf
|
||||||
|
WATCHLISTS text eol=lf
|
||||||
|
VERSION text eol=lf
|
||||||
|
DIR_METADATA text eol=lf
|
||||||
|
|
||||||
|
# Skip Tricium by default on files in third_party.
|
||||||
|
third_party/** -tricium
|
170
src/.gn
Normal file
170
src/.gn
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
# This file is used by the GN meta build system to find the root of the source
|
||||||
|
# tree and to set startup options. For documentation on the values set in this
|
||||||
|
# file, run "gn help dotfile" at the command line.
|
||||||
|
|
||||||
|
import("//build/dotfile_settings.gni")
|
||||||
|
import("//third_party/angle/dotfile_settings.gni")
|
||||||
|
|
||||||
|
# The location of the build configuration file.
|
||||||
|
buildconfig = "//build/config/BUILDCONFIG.gn"
|
||||||
|
|
||||||
|
# The python interpreter to use by default. On Windows, this will look
|
||||||
|
# for python3.exe and python3.bat.
|
||||||
|
script_executable = "python3"
|
||||||
|
|
||||||
|
# These arguments override the default values for items in a declare_args
|
||||||
|
# block. "gn args" in turn can override these.
|
||||||
|
#
|
||||||
|
# In general the value for a build arg in the declare_args block should be the
|
||||||
|
# default. In some cases, a DEPS-ed in project will want different defaults for
|
||||||
|
# being built as part of Chrome vs. being built standalone. In this case, the
|
||||||
|
# Chrome defaults should go here. There should be no overrides here for
|
||||||
|
# values declared in the main Chrome repository.
|
||||||
|
#
|
||||||
|
# Important note for defining defaults: This file is executed before the
|
||||||
|
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
|
||||||
|
# This means that the default_args can not depend on the platform,
|
||||||
|
# architecture, or other build parameters. If you really need that, the other
|
||||||
|
# repo should define a flag that toggles on a behavior that implements the
|
||||||
|
# additional logic required by Chrome to set the variables.
|
||||||
|
default_args = {
|
||||||
|
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
|
||||||
|
# a warning that "Build argument has no effect". When adding a v8 variable, it
|
||||||
|
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
|
||||||
|
# location when it is removed).
|
||||||
|
|
||||||
|
v8_enable_gdbjit = false
|
||||||
|
v8_imminent_deprecation_warnings = false
|
||||||
|
|
||||||
|
# Don't include webrtc's builtin task queue implementation.
|
||||||
|
rtc_link_task_queue_impl = false
|
||||||
|
|
||||||
|
# When building with Chromium, `webrtc::Location` is replaced by
|
||||||
|
# `base::Location`. Since WebRTC doesn't use `public_deps` (webrtc:8603), it
|
||||||
|
# would fail to propagate the dependency internally. Instead WebRTC let its
|
||||||
|
# embedders to define it globally for all of its targets.
|
||||||
|
rtc_common_public_deps = [ "//base" ]
|
||||||
|
|
||||||
|
# Don't include the iLBC audio codec.
|
||||||
|
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
|
||||||
|
# deps on codecs, we can remove this.
|
||||||
|
rtc_include_ilbc = false
|
||||||
|
|
||||||
|
# Changes some setup for the Crashpad build to set them to build against
|
||||||
|
# Chromium's zlib, base, etc.
|
||||||
|
crashpad_dependencies = "chromium"
|
||||||
|
|
||||||
|
# Override ANGLE's Vulkan dependencies.
|
||||||
|
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
|
||||||
|
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
|
||||||
|
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
|
||||||
|
angle_vulkan_validation_layers_dir =
|
||||||
|
"//third_party/vulkan-deps/vulkan-validation-layers/src"
|
||||||
|
|
||||||
|
# Overwrite default args declared in the Fuchsia sdk
|
||||||
|
fuchsia_sdk_readelf_exec =
|
||||||
|
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
|
||||||
|
|
||||||
|
# Overwrite default args declared in the pdfium library
|
||||||
|
pdf_partition_alloc_dir = "//base/allocator/partition_allocator"
|
||||||
|
|
||||||
|
devtools_visibility = [ "*" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# These are the targets to skip header checking by default. The files in targets
|
||||||
|
# matching these patterns (see "gn help label_pattern" for format) will not have
|
||||||
|
# their includes checked for proper dependencies when you run either
|
||||||
|
# "gn check" or "gn gen --check".
|
||||||
|
no_check_targets = [
|
||||||
|
# //v8, https://crbug.com/v8/7330
|
||||||
|
"//v8/src/inspector:inspector", # 20 errors
|
||||||
|
"//v8/test/cctest:cctest_sources", # 15 errors
|
||||||
|
"//v8/test/unittests:inspector_unittests_sources", # 2 errors
|
||||||
|
"//v8:cppgc_base", # 1 error
|
||||||
|
"//v8:v8_internal_headers", # 11 errors
|
||||||
|
"//v8:v8_libplatform", # 2 errors
|
||||||
|
]
|
||||||
|
|
||||||
|
# These are the list of GN files that run exec_script. This whitelist exists
|
||||||
|
# to force additional review for new uses of exec_script, which is strongly
|
||||||
|
# discouraged.
|
||||||
|
#
|
||||||
|
# PLEASE READ
|
||||||
|
#
|
||||||
|
# You should almost never need to add new exec_script calls. exec_script is
|
||||||
|
# slow, especially on Windows, and can cause confusing effects. Although
|
||||||
|
# individually each call isn't slow or necessarily very confusing, at the scale
|
||||||
|
# of our repo things get out of hand quickly. By strongly pushing back on all
|
||||||
|
# additions, we keep the build fast and clean. If you think you need to add a
|
||||||
|
# new call, please consider:
|
||||||
|
#
|
||||||
|
# - Do not use a script to check for the existence of a file or directory to
|
||||||
|
# enable a different mode. Instead, use GN build args to enable or disable
|
||||||
|
# functionality and set options. An example is checking for a file in the
|
||||||
|
# src-internal repo to see if the corresponding src-internal feature should
|
||||||
|
# be enabled. There are several things that can go wrong with this:
|
||||||
|
#
|
||||||
|
# - It's mysterious what causes some things to happen. Although in many cases
|
||||||
|
# such behavior can be conveniently automatic, GN optimizes for explicit
|
||||||
|
# and obvious behavior so people can more easily diagnose problems.
|
||||||
|
#
|
||||||
|
# - The user can't enable a mode for one build and not another. With GN build
|
||||||
|
# args, the user can choose the exact configuration of multiple builds
|
||||||
|
# using one checkout. But implicitly basing flags on the state of the
|
||||||
|
# checkout, this functionality is broken.
|
||||||
|
#
|
||||||
|
# - It's easy to get stale files. If for example the user edits the gclient
|
||||||
|
# to stop checking out src-internal (or any other optional thing), it's
|
||||||
|
# easy to end up with stale files still mysteriously triggering build
|
||||||
|
# conditions that are no longer appropriate (yes, this happens in real
|
||||||
|
# life).
|
||||||
|
#
|
||||||
|
# - Do not use a script to iterate files in a directory (glob):
|
||||||
|
#
|
||||||
|
# - This has the same "stale file" problem as the above discussion. Various
|
||||||
|
# operations can leave untracked files in the source tree which can cause
|
||||||
|
# surprising effects.
|
||||||
|
#
|
||||||
|
# - It becomes impossible to use "git grep" to find where a certain file is
|
||||||
|
# referenced. This operation is very common and people really do get
|
||||||
|
# confused when things aren't listed.
|
||||||
|
#
|
||||||
|
# - It's easy to screw up. One common case is a build-time script that packs
|
||||||
|
# up a directory. The author notices that the script isn't re-run when the
|
||||||
|
# directory is updated, so adds a glob so all the files are listed as
|
||||||
|
# inputs. This seems to work great... until a file is deleted. When a
|
||||||
|
# file is deleted, all the inputs the glob lists will still be up to date
|
||||||
|
# and no command-lines will have been changed. The action will not be
|
||||||
|
# re-run and the build will be broken. It is possible to get this correct
|
||||||
|
# using glob, and it's possible to mess it up without glob, but globs make
|
||||||
|
# this situation much easier to create. if the build always lists the
|
||||||
|
# files and passes them to a script, it will always be correct.
|
||||||
|
|
||||||
|
exec_script_whitelist =
|
||||||
|
build_dotfile_settings.exec_script_whitelist +
|
||||||
|
angle_dotfile_settings.exec_script_whitelist +
|
||||||
|
[
|
||||||
|
# Whitelist entries for //build should go into
|
||||||
|
# //build/dotfile_settings.gni instead, so that they can be shared
|
||||||
|
# with other repos. The entries in this list should be only for files
|
||||||
|
# in the Chromium repo outside of //build.
|
||||||
|
"//build_overrides/build.gni",
|
||||||
|
|
||||||
|
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
|
||||||
|
"//chrome/version.gni",
|
||||||
|
|
||||||
|
# TODO(dgn): Layer violation but breaks the build otherwise, see
|
||||||
|
# https://crbug.com/474506.
|
||||||
|
"//clank/java/BUILD.gn",
|
||||||
|
"//clank/native/BUILD.gn",
|
||||||
|
|
||||||
|
"//google_apis/BUILD.gn",
|
||||||
|
"//printing/BUILD.gn",
|
||||||
|
|
||||||
|
"//remoting/host/installer/linux/BUILD.gn",
|
||||||
|
"//remoting/remoting_version.gni",
|
||||||
|
"//remoting/host/installer/win/generate_clsids.gni",
|
||||||
|
|
||||||
|
"//tools/grit/grit_rule.gni",
|
||||||
|
"//tools/gritsettings/BUILD.gn",
|
||||||
|
]
|
1663
src/AUTHORS
Normal file
1663
src/AUTHORS
Normal file
File diff suppressed because it is too large
Load Diff
1763
src/BUILD.gn
Normal file
1763
src/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
27
src/LICENSE
Normal file
27
src/LICENSE
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2015 The Chromium Authors
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google LLC nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5318
src/base/BUILD.gn
Normal file
5318
src/base/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
49
src/base/DEPS
Normal file
49
src/base/DEPS
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
include_rules = [
|
||||||
|
# `#include "partition_alloc/..."` is prefered to
|
||||||
|
# `#include "base/allocator/partition_allocator/src/partition_alloc/..."`.
|
||||||
|
"+partition_alloc",
|
||||||
|
"-base/allocator/partition_allocator",
|
||||||
|
|
||||||
|
"+third_party/ashmem",
|
||||||
|
"+third_party/apple_apsl",
|
||||||
|
"+third_party/boringssl/src/include",
|
||||||
|
"+third_party/ced",
|
||||||
|
# We are moving the old jni_generator to jni_zero, some references will remain
|
||||||
|
# in //base.
|
||||||
|
"+third_party/jni_zero",
|
||||||
|
"+third_party/libevent",
|
||||||
|
"+third_party/libunwindstack/src/libunwindstack/include",
|
||||||
|
"+third_party/lss",
|
||||||
|
"+third_party/modp_b64",
|
||||||
|
"+third_party/perfetto/include",
|
||||||
|
"+third_party/perfetto/protos/perfetto",
|
||||||
|
# Conversions between base and Rust types (e.g. base::span <-> rust::Slice)
|
||||||
|
# require the cxx.h header from cxx. This is only used if Rust is enabled
|
||||||
|
# in the gn build; see //base/BUILD.gn's conditional dependency on
|
||||||
|
# //build/rust:cxx_cppdeps.
|
||||||
|
"+third_party/rust/cxx",
|
||||||
|
"+third_party/test_fonts",
|
||||||
|
# JSON Deserialization.
|
||||||
|
"+third_party/rust/serde_json_lenient/v0_1/wrapper",
|
||||||
|
|
||||||
|
# These are implicitly brought in from the root, and we don't want them.
|
||||||
|
"-ipc",
|
||||||
|
"-url",
|
||||||
|
|
||||||
|
# ICU dependendencies must be separate from the rest of base.
|
||||||
|
"-i18n",
|
||||||
|
|
||||||
|
# //base/util can use //base but not vice versa.
|
||||||
|
"-util",
|
||||||
|
]
|
||||||
|
|
||||||
|
specific_include_rules = {
|
||||||
|
# Special case
|
||||||
|
"process/current_process(|_test)\.h": [
|
||||||
|
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
|
||||||
|
],
|
||||||
|
# To evaluate the performance effects of using absl's flat_hash_map.
|
||||||
|
"supports_user_data\.h": [
|
||||||
|
"+third_party/abseil-cpp/absl/container/flat_hash_map.h",
|
||||||
|
]
|
||||||
|
}
|
6
src/base/DIR_METADATA
Normal file
6
src/base/DIR_METADATA
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
monorail: {
|
||||||
|
component: "Internals>Core"
|
||||||
|
}
|
||||||
|
buganizer_public: {
|
||||||
|
component_id: 1456128
|
||||||
|
}
|
53
src/base/OWNERS
Normal file
53
src/base/OWNERS
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# See //base/README.md to find qualification for being an owner.
|
||||||
|
|
||||||
|
set noparent
|
||||||
|
# NOTE: keep this in sync with global-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
altimin@chromium.org
|
||||||
|
danakj@chromium.org
|
||||||
|
dcheng@chromium.org
|
||||||
|
fdoray@chromium.org
|
||||||
|
gab@chromium.org
|
||||||
|
kylechar@chromium.org
|
||||||
|
mark@chromium.org
|
||||||
|
pkasting@chromium.org
|
||||||
|
thakis@chromium.org
|
||||||
|
thestig@chromium.org
|
||||||
|
wez@chromium.org
|
||||||
|
# NOTE: keep this in sync with global-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
|
||||||
|
# per-file rules:
|
||||||
|
# These are for the common case of adding or renaming files. If you're doing
|
||||||
|
# structural changes, please get a review from a reviewer in this file.
|
||||||
|
per-file BUILD.gn=*
|
||||||
|
|
||||||
|
# For Android-specific changes:
|
||||||
|
per-file ..._android*=file://base/android/OWNERS
|
||||||
|
|
||||||
|
# For Fuchsia-specific changes:
|
||||||
|
per-file ..._fuchsia*=file://build/fuchsia/OWNERS
|
||||||
|
|
||||||
|
# For Windows-specific changes:
|
||||||
|
per-file ..._win*=file://base/win/OWNERS
|
||||||
|
|
||||||
|
per-file feature_list*=asvitkine@chromium.org
|
||||||
|
per-file feature_list*=isherman@chromium.org
|
||||||
|
|
||||||
|
# Logging-related changes:
|
||||||
|
per-file check*=olivierli@chromium.org
|
||||||
|
per-file check*=pbos@chromium.org
|
||||||
|
per-file dcheck*=olivierli@chromium.org
|
||||||
|
per-file dcheck*=pbos@chromium.org
|
||||||
|
per-file logging*=olivierli@chromium.org
|
||||||
|
per-file logging*=pbos@chromium.org
|
||||||
|
per-file notimplemented.h=olivierli@chromium.org
|
||||||
|
per-file notimplemented.h=pbos@chromium.org
|
||||||
|
per-file notreached.h=olivierli@chromium.org
|
||||||
|
per-file notreached.h=pbos@chromium.org
|
||||||
|
|
||||||
|
# Restricted since rand_util.h also backs the cryptographically secure RNG.
|
||||||
|
per-file rand_util*=set noparent
|
||||||
|
per-file rand_util*=file://ipc/SECURITY_OWNERS
|
||||||
|
|
||||||
|
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS
|
159
src/base/PRESUBMIT.py
Normal file
159
src/base/PRESUBMIT.py
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
# Copyright 2012 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
"""Chromium presubmit script for src/base.
|
||||||
|
|
||||||
|
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||||
|
for more details on the presubmit API built into depot_tools.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def CheckChangeLintsClean(input_api, output_api):
|
||||||
|
"""Makes sure that the code is cpplint clean."""
|
||||||
|
# lint_filters=[] stops the OFF_BY_DEFAULT_LINT_FILTERS from being disabled,
|
||||||
|
# finding many more issues. verbose_level=1 finds a small number of additional
|
||||||
|
# issues.
|
||||||
|
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
|
||||||
|
# Only process those extensions which are used in Chromium, in directories
|
||||||
|
# that currently lint clean.
|
||||||
|
CLEAN_CPP_FILES_ONLY = (r'base/win/.*\.(cc|h)$', )
|
||||||
|
source_file_filter = lambda x: input_api.FilterSourceFile(
|
||||||
|
x,
|
||||||
|
files_to_check=CLEAN_CPP_FILES_ONLY,
|
||||||
|
files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
|
||||||
|
return input_api.canned_checks.CheckChangeLintsClean(
|
||||||
|
input_api, output_api, source_file_filter=source_file_filter,
|
||||||
|
lint_filters=[], verbose_level=1)
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoInterfacesInBase(input_api, output_api):
|
||||||
|
"""Checks to make sure no files in libbase.a have |@interface|."""
|
||||||
|
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
|
||||||
|
files = []
|
||||||
|
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
|
||||||
|
if (f.LocalPath().startswith('base/') and
|
||||||
|
not "/ios/" in f.LocalPath() and
|
||||||
|
not "/test/" in f.LocalPath() and
|
||||||
|
not f.LocalPath().endswith('.java') and
|
||||||
|
not f.LocalPath().endswith('_unittest.mm') and
|
||||||
|
not f.LocalPath().endswith('_spi.h')):
|
||||||
|
contents = input_api.ReadFile(f)
|
||||||
|
if pattern.search(contents):
|
||||||
|
files.append(f)
|
||||||
|
|
||||||
|
if len(files):
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Objective-C interfaces or categories are forbidden in libbase. ' +
|
||||||
|
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
|
||||||
|
'browse_thread/thread/efb28c10435987fd',
|
||||||
|
files) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
|
||||||
|
"""Returns locations matching one of the search_regexes."""
|
||||||
|
def FilterFile(affected_file):
|
||||||
|
return input_api.FilterSourceFile(
|
||||||
|
affected_file,
|
||||||
|
files_to_check=files_to_check,
|
||||||
|
files_to_skip=files_to_skip)
|
||||||
|
|
||||||
|
no_presubmit = r"// no-presubmit-check"
|
||||||
|
locations = []
|
||||||
|
for f in input_api.AffectedSourceFiles(FilterFile):
|
||||||
|
for line_num, line in f.ChangedContents():
|
||||||
|
for search_regex in search_regexes:
|
||||||
|
if (input_api.re.search(search_regex, line) and
|
||||||
|
not input_api.re.search(no_presubmit, line)):
|
||||||
|
locations.append(" %s:%d" % (f.LocalPath(), line_num))
|
||||||
|
break
|
||||||
|
return locations
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoTraceEventInclude(input_api, output_api):
|
||||||
|
"""Verify that //base includes base_tracing.h instead of trace event headers.
|
||||||
|
|
||||||
|
Checks that files outside trace event implementation include the
|
||||||
|
base_tracing.h header instead of specific trace event implementation headers
|
||||||
|
to maintain compatibility with the gn flag "enable_base_tracing = false".
|
||||||
|
"""
|
||||||
|
discouraged_includes = [
|
||||||
|
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
|
||||||
|
r'^#include "third_party/perfetto/include/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*/test/.*",
|
||||||
|
r".*/trace_event/.*",
|
||||||
|
r".*/tracing/.*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
|
||||||
|
'of trace_event implementation headers. If you need to include an\n' +
|
||||||
|
'implementation header, verify that "gn check" and base_unittests\n' +
|
||||||
|
'still pass with gn arg "enable_base_tracing = false" and add\n' +
|
||||||
|
'"// no-presubmit-check" after the include. \n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _WarnPbzeroIncludes(input_api, output_api):
|
||||||
|
"""Warn to check enable_base_tracing=false when including a pbzero header.
|
||||||
|
|
||||||
|
Emits a warning when including a perfetto pbzero header, encouraging the
|
||||||
|
user to verify that //base still builds with enable_base_tracing=false.
|
||||||
|
"""
|
||||||
|
warn_includes = [
|
||||||
|
r'^#include "third_party/perfetto/protos/',
|
||||||
|
r'^#include "base/tracing/protos/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*/test/.*",
|
||||||
|
r".*/trace_event/.*",
|
||||||
|
r".*/tracing/.*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, warn_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitPromptWarning(
|
||||||
|
'Please verify that "gn check" and base_unittests still pass with\n' +
|
||||||
|
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
|
||||||
|
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
|
||||||
|
'to exclude pbzero headers and anything not supported by\n' +
|
||||||
|
'//base/trace_event/trace_event_stub.h.\n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _CommonChecks(input_api, output_api):
|
||||||
|
"""Checks common to both upload and commit."""
|
||||||
|
results = []
|
||||||
|
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
|
||||||
|
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
|
||||||
|
results.extend(_WarnPbzeroIncludes(input_api, output_api))
|
||||||
|
results.extend(CheckChangeLintsClean(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnUpload(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnCommit(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
87
src/base/README.md
Normal file
87
src/base/README.md
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
# What is this
|
||||||
|
Contains a written down set of principles and other information on //base.
|
||||||
|
Please add to it!
|
||||||
|
|
||||||
|
## About //base:
|
||||||
|
|
||||||
|
Chromium is a very mature project. Most things that are generally useful are
|
||||||
|
already here and things not here aren't generally useful.
|
||||||
|
|
||||||
|
The bar for adding stuff to base is that it must have demonstrated wide
|
||||||
|
applicability. Prefer to add things closer to where they're used (i.e. "not
|
||||||
|
base"), and pull into base only when needed. In a project our size,
|
||||||
|
sometimes even duplication is OK and inevitable.
|
||||||
|
|
||||||
|
Adding a new logging macro `DPVELOG_NE` is not more clear than just
|
||||||
|
writing the stuff you want to log in a regular logging statement, even
|
||||||
|
if it makes your calling code longer. Just add it to your own code.
|
||||||
|
|
||||||
|
If the code in question does not need to be used inside base, but will have
|
||||||
|
multiple consumers across the codebase, consider placing it in a new directory
|
||||||
|
under components/ instead.
|
||||||
|
|
||||||
|
base is written for the Chromium project and is not intended to be used
|
||||||
|
outside it. Using base outside of src.git is explicitly not supported,
|
||||||
|
and base makes no guarantees about API (or even ABI) stability (like all
|
||||||
|
other code in Chromium). New code that depends on base/ must be in
|
||||||
|
src.git. Code that's not in src.git but pulled in through DEPS (for
|
||||||
|
example, v8) cannot use base.
|
||||||
|
|
||||||
|
## Qualifications for being in //base OWNERS
|
||||||
|
* interest and ability to learn low level/high detail/complex c++ stuff
|
||||||
|
* inclination to always ask why and understand everything (including external
|
||||||
|
interactions like win32) rather than just hoping the author did it right
|
||||||
|
* mentorship/experience
|
||||||
|
* demonstrated good judgement (esp with regards to public APIs) over a length
|
||||||
|
of time
|
||||||
|
|
||||||
|
Owners are added when a contributor has shown the above qualifications and
|
||||||
|
when they express interest. There isn't an upper bound on the number of OWNERS.
|
||||||
|
|
||||||
|
## Design and naming
|
||||||
|
* Be sure to use the base namespace.
|
||||||
|
* STL-like constructs should adhere as closely to STL as possible. Functions
|
||||||
|
and behaviors not present in STL should only be added when they are related
|
||||||
|
to the specific data structure implemented by the container.
|
||||||
|
* For STL-like constructs our policy is that they should use STL-like naming
|
||||||
|
even when it may conflict with the style guide. So functions and class names
|
||||||
|
should be lower case with underscores. Non-STL-like classes and functions
|
||||||
|
should use Google naming.
|
||||||
|
|
||||||
|
## Performance testing
|
||||||
|
|
||||||
|
Since the primitives provided by //base are used very widely, it is important to
|
||||||
|
ensure they scale to the necessary workloads and perform well under all
|
||||||
|
supported platforms. The `base_perftests` target is a suite of
|
||||||
|
synthetic microbenchmarks that measure performance in various scenarios:
|
||||||
|
|
||||||
|
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
|
||||||
|
queue in isolation.
|
||||||
|
* ConditionVariablePerfTest: Measures thread switching cost of condition
|
||||||
|
variables.
|
||||||
|
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
|
||||||
|
machinery.
|
||||||
|
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
|
||||||
|
* MessageLoopPerfTest: Measures the speed of task posting in various
|
||||||
|
configurations.
|
||||||
|
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
|
||||||
|
* PartitionLockPerfTest: Tests the implementation of Lock used in
|
||||||
|
PartitionAlloc
|
||||||
|
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
|
||||||
|
pthreads.
|
||||||
|
* RandUtilPerfTest: Measures the time it takes to generate random numbers.
|
||||||
|
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
|
||||||
|
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
|
||||||
|
underlying task runners.
|
||||||
|
* TaskObserverPerfTest: Measures the incremental cost of adding task
|
||||||
|
observers.
|
||||||
|
* TaskPerfTest: Checks the cost of posting tasks between threads.
|
||||||
|
* ThreadLocalStoragePerfTest: Exercises different mechanisms for accessing
|
||||||
|
data associated with the current thread (C++ `thread_local`, the
|
||||||
|
implementation in //base, the POSIX/WinAPI directly)
|
||||||
|
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
|
||||||
|
multithreaded scenarios.
|
||||||
|
|
||||||
|
Regressions in these benchmarks can generally by caused by 1) operating system
|
||||||
|
changes, 2) compiler version or flag changes or 3) changes in //base code
|
||||||
|
itself.
|
13
src/base/SECURITY_OWNERS
Normal file
13
src/base/SECURITY_OWNERS
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Changes to code that runs at high privilege and which has a high risk of
|
||||||
|
# memory corruption, such as parsers for complex inputs, require a security
|
||||||
|
# review to avoid introducing sandbox escapes.
|
||||||
|
#
|
||||||
|
# Although this file is in base/, it may apply to more than just base, OWNERS
|
||||||
|
# files outside of base may also include this file.
|
||||||
|
#
|
||||||
|
# Security team: If you are uncomfortable reviewing a particular bit of code
|
||||||
|
# yourself, don't hesitate to seek help from another security team member!
|
||||||
|
# Nobody knows everything, and the only way to learn is from experience.
|
||||||
|
dcheng@chromium.org
|
||||||
|
rsesek@chromium.org
|
||||||
|
tsepez@chromium.org
|
31
src/base/allocator/BUILD.gn
Normal file
31
src/base/allocator/BUILD.gn
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright 2013 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/allocator.gni")
|
||||||
|
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/compiler/compiler.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
|
||||||
|
buildflag_header("buildflags") {
|
||||||
|
header = "buildflags.h"
|
||||||
|
|
||||||
|
flags =
|
||||||
|
[ "USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_apple) {
|
||||||
|
source_set("early_zone_registration_apple") {
|
||||||
|
sources = [
|
||||||
|
"early_zone_registration_apple.cc",
|
||||||
|
"early_zone_registration_apple.h",
|
||||||
|
"partition_allocator/src/partition_alloc/shim/early_zone_registration_constants.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
":buildflags",
|
||||||
|
"//base/allocator/partition_allocator:buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
6
src/base/allocator/DIR_METADATA
Normal file
6
src/base/allocator/DIR_METADATA
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
monorail: {
|
||||||
|
component: "Internals"
|
||||||
|
}
|
||||||
|
buganizer_public: {
|
||||||
|
component_id: 1456292
|
||||||
|
}
|
7
src/base/allocator/OWNERS
Normal file
7
src/base/allocator/OWNERS
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
lizeb@chromium.org
|
||||||
|
primiano@chromium.org
|
||||||
|
wfh@chromium.org
|
||||||
|
|
||||||
|
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
|
||||||
|
per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
|
||||||
|
per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS
|
155
src/base/allocator/README.md
Normal file
155
src/base/allocator/README.md
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
This document describes how malloc / new calls are routed in the various Chrome
|
||||||
|
platforms.
|
||||||
|
|
||||||
|
Bear in mind that the chromium codebase does not always just use `malloc()`.
|
||||||
|
Some examples:
|
||||||
|
- Large parts of the renderer (Blink) use two home-brewed allocators,
|
||||||
|
PartitionAlloc and BlinkGC (Oilpan).
|
||||||
|
- Some subsystems, such as the V8 JavaScript engine, handle memory management
|
||||||
|
autonomously.
|
||||||
|
- Various parts of the codebase use abstractions such as `SharedMemory` or
|
||||||
|
`DiscardableMemory` which, similarly to the above, have their own page-level
|
||||||
|
memory management.
|
||||||
|
|
||||||
|
Background
|
||||||
|
----------
|
||||||
|
The `allocator` target defines at compile-time the platform-specific choice of
|
||||||
|
the allocator and extra-hooks which services calls to malloc/new. The relevant
|
||||||
|
build-time flags involved are `use_allocator_shim` and
|
||||||
|
`use_partition_alloc_as_malloc`.
|
||||||
|
|
||||||
|
By default, these are true on all platforms except iOS (not yet supported) and
|
||||||
|
NaCl (no plan to support).
|
||||||
|
Furthermore, when building with a sanitizer (e.g. `asan`, `msan`, ...) both the
|
||||||
|
allocator and the shim layer are disabled.
|
||||||
|
|
||||||
|
|
||||||
|
Layering and build deps
|
||||||
|
-----------------------
|
||||||
|
The `allocator` target provides the linker flags required for the Windows shim
|
||||||
|
layer. The `base` target is (almost) the only one depending on `allocator`. No
|
||||||
|
other targets should depend on it, with the exception of the very few
|
||||||
|
executables / dynamic libraries that don't depend, either directly or
|
||||||
|
indirectly, on `base` within the scope of a linker unit.
|
||||||
|
|
||||||
|
More importantly, **no other place outside of `/base` should depend on the
|
||||||
|
specific allocator**.
|
||||||
|
If such a functional dependency is required that should be achieved using
|
||||||
|
abstractions in `base` (see `/base/memory/`)
|
||||||
|
|
||||||
|
**Why `base` depends on `allocator`?**
|
||||||
|
Because it needs to provide services that depend on the actual allocator
|
||||||
|
implementation. In the past `base` used to pretend to be allocator-agnostic
|
||||||
|
and get the dependencies injected by other layers. This ended up being an
|
||||||
|
inconsistent mess.
|
||||||
|
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
|
||||||
|
|
||||||
|
Linker unit targets (executables and shared libraries) that depend in some way
|
||||||
|
on `base` (most of the targets in the codebase) automatically get the correct
|
||||||
|
set of linker flags to pull in the Windows shim-layer (if needed).
|
||||||
|
|
||||||
|
|
||||||
|
Source code
|
||||||
|
-----------
|
||||||
|
This directory contains just the allocator (i.e. shim) layer that switches
|
||||||
|
between the different underlying memory allocation implementations.
|
||||||
|
|
||||||
|
|
||||||
|
Unified allocator shim
|
||||||
|
----------------------
|
||||||
|
On most platforms, Chrome overrides the malloc / operator new symbols (and
|
||||||
|
corresponding free / delete and other variants). This is to enforce security
|
||||||
|
checks and lately to enable the
|
||||||
|
[memory-infra heap profiler][url-memory-infra-heap-profiler].
|
||||||
|
Historically each platform had its special logic for defining the allocator
|
||||||
|
symbols in different places of the codebase. The unified allocator shim is
|
||||||
|
a project aimed to unify the symbol definition and allocator routing logic in
|
||||||
|
a central place.
|
||||||
|
|
||||||
|
- Full documentation: [Allocator shim design doc][url-allocator-shim].
|
||||||
|
- Current state: Available and enabled by default on Android, CrOS, Linux,
|
||||||
|
Mac OS and Windows.
|
||||||
|
- Tracking bug: [crbug.com/550886](https://crbug.com/550886).
|
||||||
|
- Build-time flag: `use_allocator_shim`.
|
||||||
|
|
||||||
|
**Overview of the unified allocator shim**
|
||||||
|
The allocator shim consists of three stages:
|
||||||
|
```
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| malloc & friends | -> | shim layer | -> | Routing to |
|
||||||
|
| symbols definition | | implementation | | allocator |
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| - libc symbols (malloc, | | - Security checks | | - glibc |
|
||||||
|
| calloc, free, ...) | | - Chain of dispatchers| | - Android |
|
||||||
|
| - C++ symbols (operator | | that can intercept | | bionic |
|
||||||
|
| new, delete, ...) | | and override | | - WinHeap |
|
||||||
|
| - glibc weak symbols | | allocations | | - Partition |
|
||||||
|
| (__libc_malloc, ...) | +-----------------------+ | Alloc |
|
||||||
|
+-------------------------+ +----------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
**1. malloc symbols definition**
|
||||||
|
This stage takes care of overriding the symbols `malloc`, `free`,
|
||||||
|
`operator new`, `operator delete` and friends and routing those calls inside the
|
||||||
|
allocator shim (next point).
|
||||||
|
This is taken care of by the headers in `allocator_shim_override_*`.
|
||||||
|
|
||||||
|
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
|
||||||
|
can override in `allocator_shim_override_ucrt_symbols_win.h`.
|
||||||
|
|
||||||
|
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
|
||||||
|
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
|
||||||
|
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
|
||||||
|
`operator delete` and friends).
|
||||||
|
This enables proper interposition of malloc symbols referenced by the main
|
||||||
|
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
|
||||||
|
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
|
||||||
|
The Linux/CrOS shim was introduced by
|
||||||
|
[crrev.com/1675143004](https://crrev.com/1675143004).
|
||||||
|
|
||||||
|
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
|
||||||
|
possible. This is because Android processes are `fork()`-ed from the Android
|
||||||
|
zygote, which pre-loads libc.so and only later native code gets loaded via
|
||||||
|
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
|
||||||
|
scope).
|
||||||
|
In this case, the approach instead of wrapping symbol resolution at link time
|
||||||
|
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
|
||||||
|
The use of this wrapping flag causes:
|
||||||
|
- All references to allocator symbols in the Chrome codebase to be rewritten as
|
||||||
|
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
|
||||||
|
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
|
||||||
|
route allocator calls inside the shim layer.
|
||||||
|
- The reference to the original `malloc` symbols (which typically is defined by
|
||||||
|
the system's libc.so) are accessible via the special `__real_malloc` and
|
||||||
|
friends symbols (which will be relocated, at load time, against `malloc`).
|
||||||
|
|
||||||
|
In summary, this approach is transparent to the dynamic loader, which still sees
|
||||||
|
undefined symbol references to malloc symbols.
|
||||||
|
These symbols will be resolved against libc.so as usual.
|
||||||
|
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
|
||||||
|
|
||||||
|
**2. Shim layer implementation**
|
||||||
|
This stage contains the actual shim implementation. This consists of:
|
||||||
|
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
|
||||||
|
(using the `InsertAllocatorDispatch` API). They can intercept and override
|
||||||
|
allocator calls.
|
||||||
|
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
|
||||||
|
This happens inside `allocator_shim.cc`
|
||||||
|
|
||||||
|
**3. Final allocator routing**
|
||||||
|
The final element of the aforementioned dispatcher chain is statically defined
|
||||||
|
at build time and ultimately routes the allocator calls to the actual allocator
|
||||||
|
(as described in the *Background* section above). This is taken care of by the
|
||||||
|
headers in `allocator_shim_default_dispatch_to_*` files.
|
||||||
|
|
||||||
|
|
||||||
|
Related links
|
||||||
|
-------------
|
||||||
|
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
|
||||||
|
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
|
||||||
|
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
|
||||||
|
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
|
||||||
|
|
||||||
|
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
|
||||||
|
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
|
||||||
|
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
|
27
src/base/allocator/allocator.gni
Normal file
27
src/base/allocator/allocator.gni
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Copyright 2019 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||||
|
|
||||||
|
# Chromium-specific asserts. External embedders _may_ elect to use these
|
||||||
|
# features even without PA-E.
|
||||||
|
if (!use_partition_alloc_as_malloc) {
|
||||||
|
# In theory, BackupRefPtr will work just fine without
|
||||||
|
# PartitionAlloc-Everywhere, but its scope would be limited to partitions
|
||||||
|
# that are invoked explicitly (not via malloc). These are only Blink
|
||||||
|
# partition, where we currently don't even use raw_ptr<T>.
|
||||||
|
assert(!enable_backup_ref_ptr_support,
|
||||||
|
"Chromium does not use BRP without PA-E")
|
||||||
|
|
||||||
|
# Pointer compression works only if all pointers are guaranteed to be
|
||||||
|
# allocated by PA (in one of its core pools, to be precise). In theory,
|
||||||
|
# this could be useful with partitions that are invoked explicitly. In
|
||||||
|
# practice, the pointers we have in mind for compression (scoped_refptr<>,
|
||||||
|
# unique_ptr<>) require PA-E.
|
||||||
|
assert(!enable_pointer_compression_support,
|
||||||
|
"Pointer compressions likely doesn't make sense without PA-E")
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(use_allocator_shim || !use_partition_alloc_as_malloc,
|
||||||
|
"PartitionAlloc-Everywhere requires the allocator shim")
|
38
src/base/allocator/allocator_check.cc
Normal file
38
src/base/allocator/allocator_check.cc
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_check.h"
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "partition_alloc/shim/winheap_stubs_win.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include "partition_alloc/shim/allocator_interception_apple.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator {
|
||||||
|
|
||||||
|
bool IsAllocatorInitialized() {
|
||||||
|
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
// Set by allocator_shim_override_ucrt_symbols_win.h when the
|
||||||
|
// shimmed _set_new_mode() is called.
|
||||||
|
return allocator_shim::g_is_win_shim_layer_initialized;
|
||||||
|
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
|
||||||
|
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
// From allocator_interception_mac.mm.
|
||||||
|
return allocator_shim::g_replaced_default_zone;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator
|
18
src/base/allocator/allocator_check.h
Normal file
18
src/base/allocator/allocator_check.h
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
BASE_EXPORT bool IsAllocatorInitialized();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
24
src/base/allocator/dispatcher/configuration.h
Normal file
24
src/base/allocator/dispatcher/configuration.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::configuration {
|
||||||
|
|
||||||
|
// The maximum number of optional observers that may be present depending on
|
||||||
|
// command line parameters.
|
||||||
|
constexpr size_t kMaximumNumberOfOptionalObservers = 4;
|
||||||
|
|
||||||
|
// The total number of observers including mandatory and optional observers.
|
||||||
|
// Primarily the number of observers affects the performance at allocation time.
|
||||||
|
// The current value of 4 doesn't have hard evidence. Keep in mind that
|
||||||
|
// also a single observer can severely impact performance.
|
||||||
|
constexpr size_t kMaximumNumberOfObservers = 4;
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::configuration
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
114
src/base/allocator/dispatcher/dispatcher.cc
Normal file
114
src/base/allocator/dispatcher/dispatcher.cc
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/dispatcher.h"
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/dcheck_is_on.h"
|
||||||
|
#include "base/no_destructor.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/shim/allocator_shim.h"
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
#include <atomic>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
#include "partition_alloc/partition_alloc_hooks.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
// The private implementation of Dispatcher.
|
||||||
|
struct Dispatcher::Impl {
|
||||||
|
void Initialize(const internal::DispatchData& dispatch_data) {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
DCHECK(!is_initialized_check_flag_.test_and_set());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
dispatch_data_ = dispatch_data;
|
||||||
|
ConnectToEmitters(dispatch_data_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Reset() {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
DCHECK([&]() {
|
||||||
|
auto const was_set = is_initialized_check_flag_.test_and_set();
|
||||||
|
is_initialized_check_flag_.clear();
|
||||||
|
return was_set;
|
||||||
|
}());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
DisconnectFromEmitters(dispatch_data_);
|
||||||
|
dispatch_data_ = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Connect the hooks to the memory subsystem. In some cases, most notably when
|
||||||
|
// we have no observers at all, the hooks will be invalid and must NOT be
|
||||||
|
// connected. This way we prevent notifications although no observers are
|
||||||
|
// present.
|
||||||
|
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||||
|
allocator_shim::InsertAllocatorDispatch(allocator_dispatch);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
{
|
||||||
|
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
|
||||||
|
auto* const free_hook = dispatch_data.GetFreeObserverHook();
|
||||||
|
if (allocation_hook && free_hook) {
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(allocation_hook,
|
||||||
|
free_hook);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||||
|
allocator_shim::RemoveAllocatorDispatchForTesting(
|
||||||
|
allocator_dispatch); // IN-TEST
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Information on the hooks.
|
||||||
|
internal::DispatchData dispatch_data_;
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
// Indicator if the dispatcher has been initialized before.
|
||||||
|
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||||
|
__cpp_lib_atomic_value_initialization < 201911L
|
||||||
|
std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT;
|
||||||
|
#else
|
||||||
|
std::atomic_flag is_initialized_check_flag_;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
Dispatcher::Dispatcher() : impl_(std::make_unique<Impl>()) {}
|
||||||
|
|
||||||
|
Dispatcher::~Dispatcher() = default;
|
||||||
|
|
||||||
|
Dispatcher& Dispatcher::GetInstance() {
|
||||||
|
static base::NoDestructor<Dispatcher> instance;
|
||||||
|
return *instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::Initialize(const internal::DispatchData& dispatch_data) {
|
||||||
|
impl_->Initialize(dispatch_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::ResetForTesting() {
|
||||||
|
impl_->Reset();
|
||||||
|
}
|
||||||
|
} // namespace base::allocator::dispatcher
|
75
src/base/allocator/dispatcher/dispatcher.h
Normal file
75
src/base/allocator/dispatcher/dispatcher.h
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
struct DispatchData;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dispatcher serves as the top level instance for managing the dispatch
|
||||||
|
// mechanism. The class instance manages connections to the various memory
|
||||||
|
// subsystems such as PartitionAlloc. To keep the public interface as lean as
|
||||||
|
// possible it uses a pimpl pattern.
|
||||||
|
class BASE_EXPORT Dispatcher {
|
||||||
|
public:
|
||||||
|
static Dispatcher& GetInstance();
|
||||||
|
|
||||||
|
Dispatcher();
|
||||||
|
|
||||||
|
// Initialize the dispatch mechanism with the given tuple of observers. The
|
||||||
|
// observers must be valid (it is only DCHECKed internally at initialization,
|
||||||
|
// but not verified further)
|
||||||
|
// If Initialize is called multiple times, the first one wins. All later
|
||||||
|
// invocations are silently ignored. Initialization is protected from
|
||||||
|
// concurrent invocations. In case of concurrent accesses, the first one to
|
||||||
|
// get the lock wins.
|
||||||
|
// The dispatcher invokes following functions on the observers:
|
||||||
|
// void OnAllocation(void* address,
|
||||||
|
// size_t size,
|
||||||
|
// AllocationSubsystem sub_system,
|
||||||
|
// const char* type_name);
|
||||||
|
// void OnFree(void* address);
|
||||||
|
//
|
||||||
|
// Note: The dispatcher mechanism does NOT bring systematic protection against
|
||||||
|
// recursive invocations. That is, observers which allocate memory on the
|
||||||
|
// heap, i.e. through dynamically allocated containers or by using the
|
||||||
|
// CHECK-macro, are responsible to break these recursions!
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
void Initialize(const std::tuple<ObserverTypes...>& observers) {
|
||||||
|
// Get the hooks for running these observers and pass them to further
|
||||||
|
// initialization
|
||||||
|
Initialize(internal::GetNotificationHooks(observers));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following functions provide an interface to setup and tear down the
|
||||||
|
// dispatcher when testing. This must NOT be used from production code since
|
||||||
|
// the hooks cannot be removed reliably under all circumstances.
|
||||||
|
template <typename ObserverType>
|
||||||
|
void InitializeForTesting(ObserverType* observer) {
|
||||||
|
Initialize(std::make_tuple(observer));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// structure and pointer to the private implementation.
|
||||||
|
struct Impl;
|
||||||
|
std::unique_ptr<Impl> const impl_;
|
||||||
|
|
||||||
|
~Dispatcher();
|
||||||
|
|
||||||
|
void Initialize(const internal::DispatchData& dispatch_data);
|
||||||
|
};
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
205
src/base/allocator/dispatcher/initializer.h
Normal file
205
src/base/allocator/dispatcher/initializer.h
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/configuration.h"
|
||||||
|
#include "base/allocator/dispatcher/dispatcher.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/tools.h"
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Filter the passed observers and perform initialization of the passed
|
||||||
|
// dispatcher.
|
||||||
|
template <size_t CurrentIndex,
|
||||||
|
typename DispatcherType,
|
||||||
|
typename CheckObserverPredicate,
|
||||||
|
typename VerifiedObservers,
|
||||||
|
typename UnverifiedObservers,
|
||||||
|
size_t... IndicesToSelect>
|
||||||
|
inline void DoInitialize(DispatcherType& dispatcher,
|
||||||
|
CheckObserverPredicate check_observer,
|
||||||
|
const VerifiedObservers& verified_observers,
|
||||||
|
const UnverifiedObservers& unverified_observers,
|
||||||
|
std::index_sequence<IndicesToSelect...> indices) {
|
||||||
|
if constexpr (CurrentIndex < std::tuple_size_v<UnverifiedObservers>) {
|
||||||
|
// We still have some items left to handle.
|
||||||
|
if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
|
||||||
|
// The current observer is valid. Hence, append the index of the current
|
||||||
|
// item to the set of indices and head on to the next item.
|
||||||
|
DoInitialize<CurrentIndex + 1>(
|
||||||
|
dispatcher, check_observer, verified_observers, unverified_observers,
|
||||||
|
std::index_sequence<IndicesToSelect..., CurrentIndex>{});
|
||||||
|
} else {
|
||||||
|
// The current observer is not valid. Hence, head on to the next item with
|
||||||
|
// an unaltered list of indices.
|
||||||
|
DoInitialize<CurrentIndex + 1>(dispatcher, check_observer,
|
||||||
|
verified_observers, unverified_observers,
|
||||||
|
indices);
|
||||||
|
}
|
||||||
|
} else if constexpr (CurrentIndex == std::tuple_size_v<UnverifiedObservers>) {
|
||||||
|
// So we have met the end of the tuple of observers to verify.
|
||||||
|
// Hence, we extract the additional valid observers, append to the tuple of
|
||||||
|
// already verified observers and hand over to the dispatcher.
|
||||||
|
auto observers = std::tuple_cat(
|
||||||
|
verified_observers,
|
||||||
|
std::make_tuple(std::get<IndicesToSelect>(unverified_observers)...));
|
||||||
|
|
||||||
|
// Do a final check that neither the maximum total number of observers nor
|
||||||
|
// the maximum number of optional observers is exceeded.
|
||||||
|
static_assert(std::tuple_size_v<decltype(observers)> <=
|
||||||
|
configuration::kMaximumNumberOfObservers);
|
||||||
|
static_assert(sizeof...(IndicesToSelect) <=
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers);
|
||||||
|
|
||||||
|
dispatcher.Initialize(std::move(observers));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// The result of concatenating two tuple-types.
|
||||||
|
template <typename... tuples>
|
||||||
|
using TupleCat = decltype(std::tuple_cat(std::declval<tuples>()...));
|
||||||
|
|
||||||
|
// Initializer collects mandatory and optional observers and initializes the
|
||||||
|
// passed Dispatcher with only the enabled observers.
|
||||||
|
//
|
||||||
|
// In some situations, presence of observers depends on runtime. i.e. command
|
||||||
|
// line parameters or CPU features. With 3 optional observers we already have 8
|
||||||
|
// different combinations. Initializer takes the job of dealing with all
|
||||||
|
// combinations from the user. It allows users to pass all observers (including
|
||||||
|
// nullptr for disabled optional observers) and initializes the Dispatcher with
|
||||||
|
// only the enabled observers.
|
||||||
|
//
|
||||||
|
// Since this process results in a combinatoric explosion, Initializer
|
||||||
|
// distinguishes between optional and mandatory observers. Mandatory observers
|
||||||
|
// are not included in the filtering process and must always be enabled (not
|
||||||
|
// nullptr).
|
||||||
|
//
|
||||||
|
// To allow the Initializer to track the number and exact type of observers, it
|
||||||
|
// is implemented as a templated class which holds information on the types in
|
||||||
|
// the std::tuples passed as template parameters. Therefore, whenever any type
|
||||||
|
// observer it set, the initializer changes its type to reflect this.
|
||||||
|
template <typename MandatoryObservers = std::tuple<>,
|
||||||
|
typename OptionalObservers = std::tuple<>>
|
||||||
|
struct BASE_EXPORT Initializer {
|
||||||
|
Initializer() = default;
|
||||||
|
Initializer(MandatoryObservers mandatory_observers,
|
||||||
|
OptionalObservers optional_observers)
|
||||||
|
: mandatory_observers_(std::move(mandatory_observers)),
|
||||||
|
optional_observers_(std::move(optional_observers)) {}
|
||||||
|
|
||||||
|
// Set the mandatory observers. The number of observers that can be set is
|
||||||
|
// limited by configuration::maximum_number_of_observers.
|
||||||
|
template <typename... NewMandatoryObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual((sizeof...(NewMandatoryObservers) +
|
||||||
|
std::tuple_size_v<OptionalObservers>),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
|
||||||
|
SetMandatoryObservers(NewMandatoryObservers*... mandatory_observers) const {
|
||||||
|
return {std::make_tuple(mandatory_observers...), GetOptionalObservers()};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add mandatory observers. The number of observers that can be added is
|
||||||
|
// limited by the current number of observers, see
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <typename... AdditionalMandatoryObservers,
|
||||||
|
std::enable_if_t<internal::LessEqual(
|
||||||
|
std::tuple_size_v<MandatoryObservers> +
|
||||||
|
sizeof...(AdditionalMandatoryObservers) +
|
||||||
|
std::tuple_size_v<OptionalObservers>,
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<TupleCat<MandatoryObservers,
|
||||||
|
std::tuple<AdditionalMandatoryObservers*...>>,
|
||||||
|
OptionalObservers>
|
||||||
|
AddMandatoryObservers(
|
||||||
|
AdditionalMandatoryObservers*... additional_mandatory_observers) const {
|
||||||
|
return {std::tuple_cat(GetMandatoryObservers(),
|
||||||
|
std::make_tuple(additional_mandatory_observers...)),
|
||||||
|
GetOptionalObservers()};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the optional observers. The number of observers that can be set is
|
||||||
|
// limited by configuration::maximum_number_of_optional_observers as well as
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <
|
||||||
|
typename... NewOptionalObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual(
|
||||||
|
sizeof...(NewOptionalObservers),
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||||
|
internal::LessEqual((sizeof...(NewOptionalObservers) +
|
||||||
|
std::tuple_size_v<MandatoryObservers>),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
|
||||||
|
SetOptionalObservers(NewOptionalObservers*... optional_observers) const {
|
||||||
|
return {GetMandatoryObservers(), std::make_tuple(optional_observers...)};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add optional observers. The number of observers that can be added is
|
||||||
|
// limited by the current number of optional observers,
|
||||||
|
// configuration::maximum_number_of_optional_observers as well as
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <
|
||||||
|
typename... AdditionalOptionalObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual(
|
||||||
|
std::tuple_size_v<OptionalObservers> +
|
||||||
|
sizeof...(AdditionalOptionalObservers),
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||||
|
internal::LessEqual((std::tuple_size_v<OptionalObservers> +
|
||||||
|
sizeof...(AdditionalOptionalObservers) +
|
||||||
|
std::tuple_size_v<MandatoryObservers>),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<
|
||||||
|
MandatoryObservers,
|
||||||
|
TupleCat<OptionalObservers, std::tuple<AdditionalOptionalObservers*...>>>
|
||||||
|
AddOptionalObservers(
|
||||||
|
AdditionalOptionalObservers*... additional_optional_observers) const {
|
||||||
|
return {GetMandatoryObservers(),
|
||||||
|
std::tuple_cat(GetOptionalObservers(),
|
||||||
|
std::make_tuple(additional_optional_observers...))};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the actual initialization on the passed dispatcher.
|
||||||
|
// The dispatcher is passed as a template only to provide better testability.
|
||||||
|
template <typename DispatcherType>
|
||||||
|
void DoInitialize(DispatcherType& dispatcher) const {
|
||||||
|
internal::DoInitialize<0>(dispatcher, internal::IsValidObserver{},
|
||||||
|
GetMandatoryObservers(), GetOptionalObservers(),
|
||||||
|
{});
|
||||||
|
}
|
||||||
|
|
||||||
|
const MandatoryObservers& GetMandatoryObservers() const {
|
||||||
|
return mandatory_observers_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const OptionalObservers& GetOptionalObservers() const {
|
||||||
|
return optional_observers_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
MandatoryObservers mandatory_observers_;
|
||||||
|
OptionalObservers optional_observers_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Convenience function for creating an empty Initializer.
|
||||||
|
inline Initializer<> CreateInitializer() {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
42
src/base/allocator/dispatcher/internal/dispatch_data.cc
Normal file
42
src/base/allocator/dispatcher/internal/dispatch_data.cc
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
|
||||||
|
DispatchData& DispatchData::SetAllocationObserverHooks(
|
||||||
|
AllocationObserverHook* allocation_observer_hook,
|
||||||
|
FreeObserverHook* free_observer_hook) {
|
||||||
|
allocation_observer_hook_ = allocation_observer_hook;
|
||||||
|
free_observer_hook_ = free_observer_hook;
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
DispatchData::AllocationObserverHook* DispatchData::GetAllocationObserverHook()
|
||||||
|
const {
|
||||||
|
return allocation_observer_hook_;
|
||||||
|
}
|
||||||
|
|
||||||
|
DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
|
||||||
|
return free_observer_hook_;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
DispatchData& DispatchData::SetAllocatorDispatch(
|
||||||
|
AllocatorDispatch* allocator_dispatch) {
|
||||||
|
allocator_dispatch_ = allocator_dispatch;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocatorDispatch* DispatchData::GetAllocatorDispatch() const {
|
||||||
|
return allocator_dispatch_;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
58
src/base/allocator/dispatcher/internal/dispatch_data.h
Normal file
58
src/base/allocator/dispatcher/internal/dispatch_data.h
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
#include "partition_alloc/partition_alloc_hooks.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
#include "partition_alloc/shim/allocator_shim.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
using allocator_shim::AllocatorDispatch;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// A simple utility class to pass all the information required to properly hook
|
||||||
|
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
|
||||||
|
struct BASE_EXPORT DispatchData {
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
using AllocationObserverHook =
|
||||||
|
partition_alloc::PartitionAllocHooks::AllocationObserverHook;
|
||||||
|
using FreeObserverHook =
|
||||||
|
partition_alloc::PartitionAllocHooks::FreeObserverHook;
|
||||||
|
|
||||||
|
DispatchData& SetAllocationObserverHooks(AllocationObserverHook*,
|
||||||
|
FreeObserverHook*);
|
||||||
|
AllocationObserverHook* GetAllocationObserverHook() const;
|
||||||
|
FreeObserverHook* GetFreeObserverHook() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
AllocationObserverHook* allocation_observer_hook_ = nullptr;
|
||||||
|
FreeObserverHook* free_observer_hook_ = nullptr;
|
||||||
|
|
||||||
|
public:
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
|
||||||
|
AllocatorDispatch* GetAllocatorDispatch() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
AllocatorDispatch* allocator_dispatch_ = nullptr;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
376
src/base/allocator/dispatcher/internal/dispatcher_internal.h
Normal file
376
src/base/allocator/dispatcher/internal/dispatcher_internal.h
Normal file
@ -0,0 +1,376 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/configuration.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/tools.h"
|
||||||
|
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||||
|
#include "base/allocator/dispatcher/notification_data.h"
|
||||||
|
#include "base/allocator/dispatcher/subsystem.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
#include "partition_alloc/partition_alloc_allocation_data.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
#include "partition_alloc/shim/allocator_shim.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
using allocator_shim::AllocatorDispatch;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template <typename CheckObserverPredicate,
|
||||||
|
typename... ObserverTypes,
|
||||||
|
size_t... Indices>
|
||||||
|
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
CheckObserverPredicate check_observer) {
|
||||||
|
([](bool b) { DCHECK(b); }(check_observer(std::get<Indices>(observers))),
|
||||||
|
...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... ObserverTypes, size_t... Indices>
|
||||||
|
ALWAYS_INLINE void PerformAllocationNotification(
|
||||||
|
const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
const AllocationNotificationData& notification_data) {
|
||||||
|
((std::get<Indices>(observers)->OnAllocation(notification_data)), ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... ObserverTypes, size_t... Indices>
|
||||||
|
ALWAYS_INLINE void PerformFreeNotification(
|
||||||
|
const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
const FreeNotificationData& notification_data) {
|
||||||
|
((std::get<Indices>(observers)->OnFree(notification_data)), ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
// DispatcherImpl provides hooks into the various memory subsystems. These hooks
|
||||||
|
// are responsible for dispatching any notification to the observers.
|
||||||
|
// In order to provide as many information on the exact type of the observer and
|
||||||
|
// prevent any conditional jumps in the hot allocation path, observers are
|
||||||
|
// stored in a std::tuple. DispatcherImpl performs a CHECK at initialization
|
||||||
|
// time to ensure they are valid.
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
struct DispatcherImpl {
|
||||||
|
using AllObservers = std::index_sequence_for<ObserverTypes...>;
|
||||||
|
|
||||||
|
template <std::enable_if_t<
|
||||||
|
internal::LessEqual(sizeof...(ObserverTypes),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
static DispatchData GetNotificationHooks(
|
||||||
|
std::tuple<ObserverTypes*...> observers) {
|
||||||
|
s_observers = std::move(observers);
|
||||||
|
|
||||||
|
PerformObserverCheck(s_observers, AllObservers{}, IsValidObserver{});
|
||||||
|
|
||||||
|
return CreateDispatchData();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static DispatchData CreateDispatchData() {
|
||||||
|
return DispatchData()
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
|
||||||
|
&PartitionAllocatorFreeHook)
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
.SetAllocatorDispatch(&allocator_dispatch_)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
static void PartitionAllocatorAllocationHook(
|
||||||
|
const partition_alloc::AllocationNotificationData& pa_notification_data) {
|
||||||
|
AllocationNotificationData dispatcher_notification_data(
|
||||||
|
pa_notification_data.address(), pa_notification_data.size(),
|
||||||
|
pa_notification_data.type_name(),
|
||||||
|
AllocationSubsystem::kPartitionAllocator);
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
dispatcher_notification_data.SetMteReportingMode(
|
||||||
|
ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
DoNotifyAllocation(dispatcher_notification_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void PartitionAllocatorFreeHook(
|
||||||
|
const partition_alloc::FreeNotificationData& pa_notification_data) {
|
||||||
|
FreeNotificationData dispatcher_notification_data(
|
||||||
|
pa_notification_data.address(),
|
||||||
|
AllocationSubsystem::kPartitionAllocator);
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
dispatcher_notification_data.SetMteReportingMode(
|
||||||
|
ConvertToMTEMode(pa_notification_data.mte_reporting_mode()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
DoNotifyFree(dispatcher_notification_data);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
static void* AllocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
void* const address = self->next->alloc_function(self->next, size, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocUncheckedFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
void* const address =
|
||||||
|
self->next->alloc_unchecked_function(self->next, size, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
void* const address = self->next->alloc_zero_initialized_function(
|
||||||
|
self->next, n, size, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, n * size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocAlignedFn(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
void* const address = self->next->alloc_aligned_function(
|
||||||
|
self->next, alignment, size, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* ReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
void* const reallocated_address =
|
||||||
|
self->next->realloc_function(self->next, address, size, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(reallocated_address, size);
|
||||||
|
|
||||||
|
return reallocated_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void FreeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// Note: DoNotifyFree should be called before free_function (here and in
|
||||||
|
// other places). That is because observers need to handle the allocation
|
||||||
|
// being freed before calling free_function, as once the latter is executed
|
||||||
|
// the address becomes available and can be allocated by another thread.
|
||||||
|
// That would be racy otherwise.
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
self->next->free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t GetSizeEstimateFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return self->next->get_size_estimate_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t GoodSizeFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return self->next->good_size_function(self->next, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ClaimedAddressFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return self->next->claimed_address_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned BatchMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
unsigned const num_allocated = self->next->batch_malloc_function(
|
||||||
|
self->next, size, results, num_requested, context);
|
||||||
|
for (unsigned i = 0; i < num_allocated; ++i) {
|
||||||
|
DoNotifyAllocationForShim(results[i], size);
|
||||||
|
}
|
||||||
|
return num_allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BatchFreeFn(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
for (unsigned i = 0; i < num_to_be_freed; ++i) {
|
||||||
|
DoNotifyFreeForShim(to_be_freed[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
|
||||||
|
context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
self->next->free_definite_size_function(self->next, address, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void TryFreeDefaultFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
self->next->try_free_default_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
void* const address = self->next->aligned_malloc_function(
|
||||||
|
self->next, size, alignment, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
address = self->next->aligned_realloc_function(self->next, address, size,
|
||||||
|
alignment, context);
|
||||||
|
|
||||||
|
DoNotifyAllocationForShim(address, size);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AlignedFreeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
DoNotifyFreeForShim(address);
|
||||||
|
self->next->aligned_free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static void DoNotifyAllocationForShim(void* address,
|
||||||
|
size_t size) {
|
||||||
|
AllocationNotificationData notification_data(
|
||||||
|
address, size, nullptr, AllocationSubsystem::kAllocatorShim);
|
||||||
|
|
||||||
|
DoNotifyAllocation(notification_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static void DoNotifyFreeForShim(void* address) {
|
||||||
|
FreeNotificationData notification_data(address,
|
||||||
|
AllocationSubsystem::kAllocatorShim);
|
||||||
|
|
||||||
|
DoNotifyFree(notification_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static AllocatorDispatch allocator_dispatch_;
|
||||||
|
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
|
||||||
|
ALWAYS_INLINE static void DoNotifyAllocation(
|
||||||
|
const AllocationNotificationData& notification_data) {
|
||||||
|
PerformAllocationNotification(s_observers, AllObservers{},
|
||||||
|
notification_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static void DoNotifyFree(
|
||||||
|
const FreeNotificationData& notification_data) {
|
||||||
|
PerformFreeNotification(s_observers, AllObservers{}, notification_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::tuple<ObserverTypes*...> s_observers;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
|
||||||
|
&AllocFn,
|
||||||
|
&AllocUncheckedFn,
|
||||||
|
&AllocZeroInitializedFn,
|
||||||
|
&AllocAlignedFn,
|
||||||
|
&ReallocFn,
|
||||||
|
&FreeFn,
|
||||||
|
&GetSizeEstimateFn,
|
||||||
|
&GoodSizeFn,
|
||||||
|
&ClaimedAddressFn,
|
||||||
|
&BatchMallocFn,
|
||||||
|
&BatchFreeFn,
|
||||||
|
&FreeDefiniteSizeFn,
|
||||||
|
&TryFreeDefaultFn,
|
||||||
|
&AlignedMallocFn,
|
||||||
|
&AlignedReallocFn,
|
||||||
|
&AlignedFreeFn,
|
||||||
|
nullptr};
|
||||||
|
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
|
||||||
|
// Specialization of DispatcherImpl in case we have no observers to notify. In
|
||||||
|
// this special case we return a set of null pointers as the Dispatcher must not
|
||||||
|
// install any hooks at all.
|
||||||
|
template <>
|
||||||
|
struct DispatcherImpl<> {
|
||||||
|
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
|
||||||
|
return DispatchData()
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
.SetAllocationObserverHooks(nullptr, nullptr)
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
.SetAllocatorDispatch(nullptr)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// A little utility function that helps using DispatcherImpl by providing
|
||||||
|
// automated type deduction for templates.
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
inline DispatchData GetNotificationHooks(
|
||||||
|
std::tuple<ObserverTypes*...> observers) {
|
||||||
|
return DispatcherImpl<ObserverTypes...>::GetNotificationHooks(
|
||||||
|
std::move(observers));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
29
src/base/allocator/dispatcher/internal/tools.h
Normal file
29
src/base/allocator/dispatcher/internal/tools.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
constexpr bool LessEqual(size_t lhs, size_t rhs) {
|
||||||
|
return lhs <= rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Equal(size_t lhs, size_t rhs) {
|
||||||
|
return lhs == rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct IsValidObserver {
|
||||||
|
template <typename T>
|
||||||
|
constexpr bool operator()(T const* ptr) const noexcept {
|
||||||
|
return ptr != nullptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
20
src/base/allocator/dispatcher/memory_tagging.cc
Normal file
20
src/base/allocator/dispatcher/memory_tagging.cc
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
static_assert(
|
||||||
|
MTEMode::kUndefined ==
|
||||||
|
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kUndefined));
|
||||||
|
static_assert(
|
||||||
|
MTEMode::kDisabled ==
|
||||||
|
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kDisabled));
|
||||||
|
static_assert(
|
||||||
|
MTEMode::kSynchronous ==
|
||||||
|
ConvertToMTEMode(partition_alloc::TagViolationReportingMode::kSynchronous));
|
||||||
|
static_assert(MTEMode::kAsynchronous ==
|
||||||
|
ConvertToMTEMode(
|
||||||
|
partition_alloc::TagViolationReportingMode::kAsynchronous));
|
||||||
|
} // namespace base::allocator::dispatcher
|
42
src/base/allocator/dispatcher/memory_tagging.h
Normal file
42
src/base/allocator/dispatcher/memory_tagging.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TAGGING_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TAGGING_H_
|
||||||
|
|
||||||
|
#include "partition_alloc/tagging.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
// The various modes of Arm's MTE extension. The enum values should match their
|
||||||
|
// pendants in partition_alloc::TagViolationReportingMode, otherwise the below
|
||||||
|
// conversion function would involve a translation table or conditional jumps.
|
||||||
|
enum class MTEMode {
|
||||||
|
// Default settings
|
||||||
|
kUndefined,
|
||||||
|
// MTE explicitly disabled.
|
||||||
|
kDisabled,
|
||||||
|
// Precise tag violation reports, higher overhead. Good for unittests
|
||||||
|
// and security critical threads.
|
||||||
|
kSynchronous,
|
||||||
|
// Imprecise tag violation reports (async mode). Lower overhead.
|
||||||
|
kAsynchronous,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr MTEMode ConvertToMTEMode(
|
||||||
|
partition_alloc::TagViolationReportingMode pa_mte_reporting_mode) {
|
||||||
|
switch (pa_mte_reporting_mode) {
|
||||||
|
case partition_alloc::TagViolationReportingMode::kUndefined:
|
||||||
|
return MTEMode::kUndefined;
|
||||||
|
case partition_alloc::TagViolationReportingMode::kDisabled:
|
||||||
|
return MTEMode::kDisabled;
|
||||||
|
case partition_alloc::TagViolationReportingMode::kSynchronous:
|
||||||
|
return MTEMode::kSynchronous;
|
||||||
|
case partition_alloc::TagViolationReportingMode::kAsynchronous:
|
||||||
|
return MTEMode::kAsynchronous;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TAGGING_H_
|
110
src/base/allocator/dispatcher/notification_data.h
Normal file
110
src/base/allocator/dispatcher/notification_data.h
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/memory_tagging.h"
|
||||||
|
#include "base/allocator/dispatcher/subsystem.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
// Definitions of the parameter structures passed to the observer hooks. They
|
||||||
|
// are similar to the structures defined by PartitionAllocator but provide
|
||||||
|
// further information.
|
||||||
|
|
||||||
|
// The notification data for the allocation path.
|
||||||
|
class BASE_EXPORT AllocationNotificationData {
|
||||||
|
public:
|
||||||
|
constexpr AllocationNotificationData(void* address,
|
||||||
|
size_t size,
|
||||||
|
const char* type_name,
|
||||||
|
AllocationSubsystem allocation_subsystem)
|
||||||
|
: address_(address),
|
||||||
|
size_(size),
|
||||||
|
type_name_(type_name),
|
||||||
|
allocation_subsystem_(allocation_subsystem) {}
|
||||||
|
|
||||||
|
constexpr void* address() const { return address_; }
|
||||||
|
|
||||||
|
constexpr size_t size() const { return size_; }
|
||||||
|
|
||||||
|
constexpr const char* type_name() const { return type_name_; }
|
||||||
|
|
||||||
|
constexpr AllocationSubsystem allocation_subsystem() const {
|
||||||
|
return allocation_subsystem_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// In the allocation observer path, it's interesting which reporting mode is
|
||||||
|
// enabled.
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
constexpr AllocationNotificationData& SetMteReportingMode(MTEMode mode) {
|
||||||
|
mte_reporting_mode_ = mode;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
|
||||||
|
constexpr MTEMode mte_reporting_mode() const {
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
return mte_reporting_mode_;
|
||||||
|
#else
|
||||||
|
return MTEMode::kUndefined;
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void* address_ = nullptr;
|
||||||
|
size_t size_ = 0;
|
||||||
|
const char* type_name_ = nullptr;
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
AllocationSubsystem allocation_subsystem_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// The notification data for the free path.
|
||||||
|
class BASE_EXPORT FreeNotificationData {
|
||||||
|
public:
|
||||||
|
constexpr explicit FreeNotificationData(
|
||||||
|
void* address,
|
||||||
|
AllocationSubsystem allocation_subsystem)
|
||||||
|
: address_(address), allocation_subsystem_(allocation_subsystem) {}
|
||||||
|
|
||||||
|
constexpr void* address() const { return address_; }
|
||||||
|
|
||||||
|
constexpr AllocationSubsystem allocation_subsystem() const {
|
||||||
|
return allocation_subsystem_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// In the free observer path, it's interesting which reporting mode is
|
||||||
|
// enabled.
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
constexpr FreeNotificationData& SetMteReportingMode(MTEMode mode) {
|
||||||
|
mte_reporting_mode_ = mode;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
|
||||||
|
constexpr MTEMode mte_reporting_mode() const {
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
return mte_reporting_mode_;
|
||||||
|
#else
|
||||||
|
return MTEMode::kUndefined;
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void* address_ = nullptr;
|
||||||
|
#if BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
MTEMode mte_reporting_mode_ = MTEMode::kUndefined;
|
||||||
|
#endif // BUILDFLAG(HAS_MEMORY_TAGGING)
|
||||||
|
AllocationSubsystem allocation_subsystem_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_NOTIFICATION_DATA_H_
|
61
src/base/allocator/dispatcher/reentry_guard.cc
Normal file
61
src/base/allocator/dispatcher/reentry_guard.cc
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/reentry_guard.h"
|
||||||
|
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/debug/crash_logging.h"
|
||||||
|
#include "base/strings/string_number_conversions.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
#include <pthread.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
// pthread_key_t has different signedness on Mac and Android. Store the null
|
||||||
|
// value in a strongly-typed constant to avoid "comparison of integers of
|
||||||
|
// different signs" warnings when comparing with 0.
|
||||||
|
constexpr pthread_key_t kNullKey = 0;
|
||||||
|
|
||||||
|
pthread_key_t ReentryGuard::entered_key_ = kNullKey;
|
||||||
|
|
||||||
|
void ReentryGuard::InitTLSSlot() {
|
||||||
|
if (entered_key_ == kNullKey) {
|
||||||
|
int error = pthread_key_create(&entered_key_, nullptr);
|
||||||
|
CHECK(!error);
|
||||||
|
// Touch the TLS slot immediately to force any allocations.
|
||||||
|
// TODO(https://crbug.com/1411454): Use this technique to avoid allocations
|
||||||
|
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
|
||||||
|
// ReentryGuard redundant.
|
||||||
|
pthread_setspecific(entered_key_, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
DCHECK_NE(entered_key_, kNullKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
void ReentryGuard::InitTLSSlot() {}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void ReentryGuard::RecordTLSSlotToCrashKey() {
|
||||||
|
// Record the key in crash dumps to detect when it's higher than 32
|
||||||
|
// (PTHREAD_KEY_2NDLEVEL_SIZE).
|
||||||
|
// TODO(crbug.com/1411454): Remove this after diagnosing reentry crashes.
|
||||||
|
static auto* const crash_key = base::debug::AllocateCrashKeyString(
|
||||||
|
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
base::debug::SetCrashKeyString(crash_key, base::NumberToString(entered_key_));
|
||||||
|
#else
|
||||||
|
base::debug::SetCrashKeyString(crash_key, "unused");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
75
src/base/allocator/dispatcher/reentry_guard.h
Normal file
75
src/base/allocator/dispatcher/reentry_guard.h
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
#include <pthread.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
// The macOS implementation of libmalloc sometimes calls malloc recursively,
|
||||||
|
// delegating allocations between zones. That causes our hooks being called
|
||||||
|
// twice. The scoped guard allows us to detect that.
|
||||||
|
//
|
||||||
|
// Besides that the implementations of thread_local on macOS and Android
|
||||||
|
// seem to allocate memory lazily on the first access to thread_local variables
|
||||||
|
// (and on Android at least thread_local is implemented on top of pthread so is
|
||||||
|
// strictly worse for performance). Make use of pthread TLS instead of C++
|
||||||
|
// thread_local there.
|
||||||
|
struct BASE_EXPORT ReentryGuard {
|
||||||
|
ALWAYS_INLINE ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
|
||||||
|
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE ~ReentryGuard() {
|
||||||
|
if (LIKELY(allowed_))
|
||||||
|
pthread_setspecific(entered_key_, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit operator bool() const noexcept { return allowed_; }
|
||||||
|
|
||||||
|
// This function must be called before installing any allocator hooks because
|
||||||
|
// some TLS implementations may allocate (eg. glibc will require a malloc call
|
||||||
|
// to allocate storage for a higher slot number (>= PTHREAD_KEY_2NDLEVEL_SIZE
|
||||||
|
// == 32). This touches the thread-local storage so that any malloc happens
|
||||||
|
// before installing the hooks.
|
||||||
|
static void InitTLSSlot();
|
||||||
|
|
||||||
|
// InitTLSSlot() is called before crash keys are available. At some point
|
||||||
|
// after SetCrashKeyImplementation() is called, this function should be
|
||||||
|
// called to record `entered_key_` to a crash key for debugging. This may
|
||||||
|
// allocate so it must not be called from inside an allocator hook.
|
||||||
|
static void RecordTLSSlotToCrashKey();
|
||||||
|
|
||||||
|
private:
|
||||||
|
static pthread_key_t entered_key_;
|
||||||
|
const bool allowed_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
|
||||||
|
// ReentryGuard above will otherwise trigger the "unused code" warnings.
|
||||||
|
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
|
||||||
|
constexpr explicit operator bool() const noexcept { return true; }
|
||||||
|
|
||||||
|
static void InitTLSSlot();
|
||||||
|
static void RecordTLSSlotToCrashKey();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
27
src/base/allocator/dispatcher/subsystem.h
Normal file
27
src/base/allocator/dispatcher/subsystem.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
// Identifiers for the memory subsystem handling the allocation. Some observers
|
||||||
|
// require more detailed information on who is performing the allocation, i.e.
|
||||||
|
// SamplingHeapProfiler.
|
||||||
|
enum class AllocationSubsystem {
|
||||||
|
// Allocation is handled by PartitionAllocator.
|
||||||
|
kPartitionAllocator = 1,
|
||||||
|
// Allocation is handled by AllocatorShims.
|
||||||
|
kAllocatorShim = 2,
|
||||||
|
// Represents a simulated allocation event during testing and is used to
|
||||||
|
// filter out these allocations from real ones.
|
||||||
|
//
|
||||||
|
// Included for backward compatibility, this value becomes obsolete once the
|
||||||
|
// old allocation hooks are removed from PoissonAllocationSampler.
|
||||||
|
kManualForTesting = 3,
|
||||||
|
};
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
27
src/base/allocator/dispatcher/testing/dispatcher_test.h
Normal file
27
src/base/allocator/dispatcher/testing/dispatcher_test.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||||
|
|
||||||
|
#include "testing/gtest/include/gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::testing {
|
||||||
|
|
||||||
|
// DispatcherTest provides some common initialization which most of the
|
||||||
|
// unittests of the dispatcher require. DispatcherTest should not be used
|
||||||
|
// directly. Instead, derive your test fixture from it.
|
||||||
|
struct DispatcherTest : public ::testing::Test {
|
||||||
|
// Perform some commonly required initialization, at them moment
|
||||||
|
// - Initialize the TLS slot for the ReentryGuard
|
||||||
|
DispatcherTest();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Protected d'tor only to prevent direct usage of this class.
|
||||||
|
~DispatcherTest() override;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::testing
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
33
src/base/allocator/dispatcher/testing/observer_mock.h
Normal file
33
src/base/allocator/dispatcher/testing/observer_mock.h
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||||
|
|
||||||
|
#include "testing/gmock/include/gmock/gmock.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
class AllocationNotificationData;
|
||||||
|
class FreeNotificationData;
|
||||||
|
|
||||||
|
namespace testing {
|
||||||
|
|
||||||
|
// ObserverMock is a small mock class based on GoogleMock.
|
||||||
|
// It complies to the interface enforced by the dispatcher. The template
|
||||||
|
// parameter serves only to create distinct types of observers if required.
|
||||||
|
template <typename T = void>
|
||||||
|
struct ObserverMock {
|
||||||
|
MOCK_METHOD(void,
|
||||||
|
OnAllocation,
|
||||||
|
(const AllocationNotificationData& notification_data),
|
||||||
|
());
|
||||||
|
MOCK_METHOD(void,
|
||||||
|
OnFree,
|
||||||
|
(const FreeNotificationData& notification_data),
|
||||||
|
());
|
||||||
|
};
|
||||||
|
} // namespace testing
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
50
src/base/allocator/dispatcher/testing/tools.h
Normal file
50
src/base/allocator/dispatcher/testing/tools.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <tuple>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::testing {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
template <size_t Size, typename Type, typename... AppendedTypes>
|
||||||
|
struct DefineTupleFromSingleType {
|
||||||
|
using type = typename DefineTupleFromSingleType<Size - 1,
|
||||||
|
Type,
|
||||||
|
AppendedTypes...,
|
||||||
|
Type>::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Type, typename... AppendedTypes>
|
||||||
|
struct DefineTupleFromSingleType<0, Type, AppendedTypes...> {
|
||||||
|
using type = std::tuple<AppendedTypes...>;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
template <size_t Size, typename Type>
|
||||||
|
struct DefineTupleFromSingleType {
|
||||||
|
using type = typename internal::DefineTupleFromSingleType<Size, Type>::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Type, size_t Size, size_t... Indices>
|
||||||
|
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||||
|
CreateTupleOfPointers(std::array<Type, Size>& items,
|
||||||
|
std::index_sequence<Indices...>) {
|
||||||
|
return std::make_tuple((&items[Indices])...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Type, size_t Size>
|
||||||
|
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||||
|
CreateTupleOfPointers(std::array<Type, Size>& items) {
|
||||||
|
return CreateTupleOfPointers(items, std::make_index_sequence<Size>{});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::testing
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
165
src/base/allocator/dispatcher/tls.cc
Normal file
165
src/base/allocator/dispatcher/tls.cc
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/tls.h"
|
||||||
|
|
||||||
|
#if USE_LOCAL_TLS_EMULATION()
|
||||||
|
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/dcheck_is_on.h"
|
||||||
|
#include "base/debug/crash_logging.h"
|
||||||
|
#include "base/immediate_crash.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
namespace {
|
||||||
|
base::debug::CrashKeySize GetCrashKeySize(const std::string& crash_key_name) {
|
||||||
|
if (std::size(crash_key_name) <= 32ul) {
|
||||||
|
return base::debug::CrashKeySize::Size32;
|
||||||
|
}
|
||||||
|
if (std::size(crash_key_name) <= 64ul) {
|
||||||
|
return base::debug::CrashKeySize::Size64;
|
||||||
|
}
|
||||||
|
if (std::size(crash_key_name) <= 256ul) {
|
||||||
|
return base::debug::CrashKeySize::Size256;
|
||||||
|
}
|
||||||
|
CHECK(std::size(crash_key_name) <= 1024ul);
|
||||||
|
|
||||||
|
return base::debug::CrashKeySize::Size1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
void Swap(std::atomic_bool& lh_op, std::atomic_bool& rh_op) {
|
||||||
|
auto lh_op_value = lh_op.load(std::memory_order_relaxed);
|
||||||
|
auto rh_op_value = rh_op.load(std::memory_order_relaxed);
|
||||||
|
|
||||||
|
CHECK(lh_op.compare_exchange_strong(lh_op_value, rh_op_value));
|
||||||
|
CHECK(rh_op.compare_exchange_strong(rh_op_value, lh_op_value));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void* MMapAllocator::AllocateMemory(size_t size_in_bytes) {
|
||||||
|
void* const mmap_res = mmap(nullptr, size_in_bytes, PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
|
||||||
|
#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
|
||||||
|
if (mmap_res != MAP_FAILED) {
|
||||||
|
// Allow the anonymous memory region allocated by mmap(MAP_ANONYMOUS) to
|
||||||
|
// be identified in /proc/$PID/smaps. This helps improve visibility into
|
||||||
|
// Chromium's memory usage on Android.
|
||||||
|
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, mmap_res, size_in_bytes,
|
||||||
|
"tls-mmap-allocator");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return (mmap_res != MAP_FAILED) ? mmap_res : nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MMapAllocator::FreeMemoryForTesting(void* pointer_to_allocated,
|
||||||
|
size_t size_in_bytes) {
|
||||||
|
auto const munmap_res = munmap(pointer_to_allocated, size_in_bytes);
|
||||||
|
return (munmap_res == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
PThreadTLSSystem::PThreadTLSSystem() = default;
|
||||||
|
|
||||||
|
PThreadTLSSystem::PThreadTLSSystem(PThreadTLSSystem&& other) {
|
||||||
|
std::swap(crash_key_, other.crash_key_);
|
||||||
|
std::swap(data_access_key_, other.data_access_key_);
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
Swap(initialized_, other.initialized_);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
|
||||||
|
std::swap(crash_key_, other.crash_key_);
|
||||||
|
std::swap(data_access_key_, other.data_access_key_);
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
Swap(initialized_, other.initialized_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PThreadTLSSystem::Setup(
|
||||||
|
OnThreadTerminationFunction thread_termination_function,
|
||||||
|
const base::StringPiece instance_id) {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
// Initialize must happen outside of the allocation path. Therefore, it is
|
||||||
|
// secure to verify with DCHECK.
|
||||||
|
DCHECK(!initialized_.exchange(true, std::memory_order_acq_rel));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
auto const key_create_res =
|
||||||
|
pthread_key_create(&data_access_key_, thread_termination_function);
|
||||||
|
|
||||||
|
// On some platforms creating a new pthread-key requires an allocation when a
|
||||||
|
// given number of keys has been created. I.e. in glibc this limit is denoted
|
||||||
|
// by PTHREAD_KEY_2NDLEVEL_SIZE. However, this value is neither present on all
|
||||||
|
// systems nor accessible from here. Hence, we do not do any checks here.
|
||||||
|
// However, we strongly recommend to setup the TLS system as early as possible
|
||||||
|
// to avoid exceeding this limit.
|
||||||
|
|
||||||
|
// Some crashes might be caused by the initialization being performed too late
|
||||||
|
// and running into the problems mentioned above. Since there's no way to
|
||||||
|
// handle this issue programmatically, we include the key into the crashpad
|
||||||
|
// report to allow for later inspection.
|
||||||
|
std::string crash_key_name = "tls_system-";
|
||||||
|
crash_key_name += instance_id;
|
||||||
|
|
||||||
|
crash_key_ = base::debug::AllocateCrashKeyString(
|
||||||
|
crash_key_name.c_str(), GetCrashKeySize(crash_key_name));
|
||||||
|
base::debug::SetCrashKeyString(crash_key_,
|
||||||
|
base::NumberToString(data_access_key_));
|
||||||
|
|
||||||
|
return (0 == key_create_res);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PThreadTLSSystem::TearDownForTesting() {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
// TearDownForTesting must happen outside of the allocation path. Therefore,
|
||||||
|
// it is secure to verify with DCHECK.
|
||||||
|
DCHECK(initialized_.exchange(false, std::memory_order_acq_rel));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
base::debug::ClearCrashKeyString(crash_key_);
|
||||||
|
crash_key_ = nullptr;
|
||||||
|
|
||||||
|
auto const key_delete_res = pthread_key_delete(data_access_key_);
|
||||||
|
return (0 == key_delete_res);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PThreadTLSSystem::GetThreadSpecificData() {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
if (!initialized_.load(std::memory_order_acquire)) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return pthread_getspecific(data_access_key_);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PThreadTLSSystem::SetThreadSpecificData(void* data) {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
if (!initialized_.load(std::memory_order_acquire)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return (0 == pthread_setspecific(data_access_key_, data));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // USE_LOCAL_TLS_EMULATION()
|
482
src/base/allocator/dispatcher/tls.h
Normal file
482
src/base/allocator/dispatcher/tls.h
Normal file
@ -0,0 +1,482 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_POSIX) // the current allocation mechanism (mmap) and TLS
|
||||||
|
// support (pthread) are both defined by POSIX
|
||||||
|
#define USE_LOCAL_TLS_EMULATION() true
|
||||||
|
#else
|
||||||
|
#define USE_LOCAL_TLS_EMULATION() false
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if USE_LOCAL_TLS_EMULATION()
|
||||||
|
#include <algorithm>
|
||||||
|
#include <atomic>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/strings/string_piece.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#include <pthread.h>
|
||||||
|
|
||||||
|
#if HAS_FEATURE(thread_sanitizer)
|
||||||
|
#define DISABLE_TSAN_INSTRUMENTATION __attribute__((no_sanitize("thread")))
|
||||||
|
#else
|
||||||
|
#define DISABLE_TSAN_INSTRUMENTATION
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define STR_HELPER(x) #x
|
||||||
|
#define STR(x) STR_HELPER(x)
|
||||||
|
|
||||||
|
// Verify that a condition holds and cancel the process in case it doesn't. The
|
||||||
|
// functionality is similar to RAW_CHECK but includes more information in the
|
||||||
|
// logged messages. It is non allocating to prevent recursions.
|
||||||
|
#define TLS_RAW_CHECK(error_message, condition) \
|
||||||
|
TLS_RAW_CHECK_IMPL(error_message, condition, __FILE__, __LINE__)
|
||||||
|
|
||||||
|
#define TLS_RAW_CHECK_IMPL(error_message, condition, file, line) \
|
||||||
|
do { \
|
||||||
|
if (!(condition)) { \
|
||||||
|
constexpr const char* message = \
|
||||||
|
"TLS System: " error_message " Failed condition '" #condition \
|
||||||
|
"' in (" file "@" STR(line) ").\n"; \
|
||||||
|
::logging::RawCheckFailure(message); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
namespace base::debug {
|
||||||
|
struct CrashKeyString;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Allocate memory using POSIX' mmap and unmap functionality. The allocator
|
||||||
|
// implements the allocator interface required by ThreadLocalStorage.
|
||||||
|
struct BASE_EXPORT MMapAllocator {
|
||||||
|
// The minimum size of a memory chunk when allocating. Even for chunks with
|
||||||
|
// fewer bytes, at least AllocationChunkSize bytes are allocated. For mmap, this
|
||||||
|
// is usually the page size of the system.
|
||||||
|
// For various OS-CPU combinations, partition_alloc::PartitionPageSize() is not
|
||||||
|
// constexpr. Hence, we can not use this value but define it locally.
|
||||||
|
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR) && \
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||||
|
constexpr static size_t AllocationChunkSize =
|
||||||
|
partition_alloc::PartitionPageSize();
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
constexpr static size_t AllocationChunkSize = 16384;
|
||||||
|
#elif BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_64_BITS)
|
||||||
|
constexpr static size_t AllocationChunkSize = 16384;
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
constexpr static size_t AllocationChunkSize = 16384;
|
||||||
|
#else
|
||||||
|
constexpr static size_t AllocationChunkSize = 4096;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Allocate size_in_bytes bytes of raw memory. Return nullptr if allocation
|
||||||
|
// fails.
|
||||||
|
void* AllocateMemory(size_t size_in_bytes);
|
||||||
|
// Free the raw memory pointed to by pointer_to_allocated. Returns a boolean
|
||||||
|
// value indicating if the free was successful.
|
||||||
|
bool FreeMemoryForTesting(void* pointer_to_allocated, size_t size_in_bytes);
|
||||||
|
};
|
||||||
|
|
||||||
|
// The allocator used by default for the thread local storage.
|
||||||
|
using DefaultAllocator = MMapAllocator;
|
||||||
|
|
||||||
|
using OnThreadTerminationFunction = void (*)(void*);
|
||||||
|
|
||||||
|
// The TLS system used by default for the thread local storage. It stores and
|
||||||
|
// retrieves thread specific data pointers.
|
||||||
|
class BASE_EXPORT PThreadTLSSystem {
|
||||||
|
public:
|
||||||
|
PThreadTLSSystem();
|
||||||
|
|
||||||
|
PThreadTLSSystem(const PThreadTLSSystem&) = delete;
|
||||||
|
PThreadTLSSystem(PThreadTLSSystem&&);
|
||||||
|
PThreadTLSSystem& operator=(const PThreadTLSSystem&) = delete;
|
||||||
|
PThreadTLSSystem& operator=(PThreadTLSSystem&&);
|
||||||
|
|
||||||
|
// Initialize the TLS system to store a data set for different threads.
|
||||||
|
// @param thread_termination_function An optional function which will be
|
||||||
|
// invoked upon termination of a thread.
|
||||||
|
bool Setup(OnThreadTerminationFunction thread_termination_function,
|
||||||
|
const base::StringPiece instance_id);
|
||||||
|
// Tear down the TLS system. After completing tear down, the thread
|
||||||
|
// termination function passed to Setup will not be invoked anymore.
|
||||||
|
bool TearDownForTesting();
|
||||||
|
|
||||||
|
// Get the pointer to the data associated to the current thread. Returns
|
||||||
|
// nullptr if the TLS system is not initialized or no data was set before.
|
||||||
|
void* GetThreadSpecificData();
|
||||||
|
// Set the pointer to the data associated to the current thread. Return true
|
||||||
|
// if stored successfully, false otherwise.
|
||||||
|
bool SetThreadSpecificData(void* data);
|
||||||
|
|
||||||
|
private:
|
||||||
|
base::debug::CrashKeyString* crash_key_ = nullptr;
|
||||||
|
pthread_key_t data_access_key_ = 0;
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
// From POSIX standard at https://www.open-std.org/jtc1/sc22/open/n4217.pdf:
|
||||||
|
// The effect of calling pthread_getspecific() or pthread_setspecific() with a
|
||||||
|
// key value not obtained from pthread_key_create() or after key has been
|
||||||
|
// deleted with pthread_key_delete() is undefined.
|
||||||
|
//
|
||||||
|
// Unfortunately, POSIX doesn't define a special value of pthread_key_t
|
||||||
|
// indicating an invalid key which would allow us to detect accesses outside
|
||||||
|
// of initialized state. Hence, to prevent us from drifting into the evil
|
||||||
|
// realm of undefined behaviour we store whether we're somewhere between Setup
|
||||||
|
// and Teardown.
|
||||||
|
std::atomic_bool initialized_{false};
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
using DefaultTLSSystem = PThreadTLSSystem;
|
||||||
|
|
||||||
|
// In some scenarios, most notably when testing, the allocator and TLS system
|
||||||
|
// passed to |ThreadLocalStorage| are not copyable and have to be wrapped, i.e.
|
||||||
|
// using std::reference_wrapper. |dereference| is a small helper to retrieve the
|
||||||
|
// underlying value.
|
||||||
|
template <typename T>
|
||||||
|
T& dereference(T& ref) {
|
||||||
|
return ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T& dereference(std::reference_wrapper<T>& ref) {
|
||||||
|
// std::reference_wrapper requires a valid reference for construction,
|
||||||
|
// therefore, no need in checking here.
|
||||||
|
return ref.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store thread local data. The data is organized in chunks, where each chunk
|
||||||
|
// holds |ItemsPerChunk|. Each item may be free or used.
|
||||||
|
//
|
||||||
|
// When a thread requests data, the chunks are searched for a free data item,
|
||||||
|
// which is registered for this thread and marked as |used|. Further requests by
|
||||||
|
// this thread will then always return the same item. When a thread terminates,
|
||||||
|
// the item will be reset and return to the pool of free items.
|
||||||
|
//
|
||||||
|
// Upon construction, the first chunk is created. If a thread requests data and
|
||||||
|
// there is no free item available, another chunk is created. Upon destruction,
|
||||||
|
// all memory is freed. Pointers to data items become invalid!
|
||||||
|
//
|
||||||
|
// Constructor and destructor are not thread safe.
|
||||||
|
//
|
||||||
|
// @tparam PayloadType The item type to be stored.
|
||||||
|
// @tparam AllocatorType The allocator being used. An allocator must provide
|
||||||
|
// the following interface:
|
||||||
|
// void* AllocateMemory(size_t size_in_bytes); // Allocate size_in_bytes bytes
|
||||||
|
// of raw memory.
|
||||||
|
// void FreeMemory(void* pointer_to_allocated, size_t size_in_bytes); // Free
|
||||||
|
// the raw memory pointed to by pointer_to_allocated.
|
||||||
|
// Any failure in allocation or free must terminate the process.
|
||||||
|
// @tparam TLSSystemType The TLS system being used. A TLS system must provide
|
||||||
|
// the following interface:
|
||||||
|
// bool Setup(OnThreadTerminationFunction thread_termination_function);
|
||||||
|
// bool Destroy();
|
||||||
|
// void* GetThreadSpecificData();
|
||||||
|
// bool SetThreadSpecificData(void* data);
|
||||||
|
// @tparam AllocationChunkSize The minimum size of a memory chunk that the
|
||||||
|
// allocator can handle. We try to size the chunks so that each chunk uses this
|
||||||
|
// size to the maximum.
|
||||||
|
// @tparam IsDestructibleForTesting For testing purposes we allow the destructor
|
||||||
|
// to perform clean up upon destruction. Otherwise, using the destructor will
|
||||||
|
// result in a compilation failure.
|
||||||
|
template <typename PayloadType,
|
||||||
|
typename AllocatorType,
|
||||||
|
typename TLSSystemType,
|
||||||
|
size_t AllocationChunkSize,
|
||||||
|
bool IsDestructibleForTesting>
|
||||||
|
struct ThreadLocalStorage {
|
||||||
|
explicit ThreadLocalStorage(const base::StringPiece instance_id)
|
||||||
|
: root_(AllocateAndInitializeChunk()) {
|
||||||
|
Initialize(instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new instance of |ThreadLocalStorage| using the passed allocator
|
||||||
|
// and TLS system. This initializes the underlying TLS system and creates the
|
||||||
|
// first chunk of data.
|
||||||
|
ThreadLocalStorage(const base::StringPiece instance_id,
|
||||||
|
AllocatorType allocator,
|
||||||
|
TLSSystemType tls_system)
|
||||||
|
: allocator_(std::move(allocator)),
|
||||||
|
tls_system_(std::move(tls_system)),
|
||||||
|
root_(AllocateAndInitializeChunk()) {
|
||||||
|
Initialize(instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes an instance of |ThreadLocalStorage| and delete all the data chunks
|
||||||
|
// created.
|
||||||
|
~ThreadLocalStorage() {
|
||||||
|
if constexpr (IsDestructibleForTesting) {
|
||||||
|
TearDownForTesting();
|
||||||
|
} else if constexpr (!IsDestructibleForTesting) {
|
||||||
|
static_assert(
|
||||||
|
IsDestructibleForTesting,
|
||||||
|
"ThreadLocalStorage cannot be destructed outside of test code.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explicitly prevent all forms of Copy/Move construction/assignment. For an
|
||||||
|
// exact copy of ThreadLocalStorage we would need to copy the mapping of
|
||||||
|
// thread to item, which we can't do at the moment. On the other side, our
|
||||||
|
// atomic members do not support moving out of the box.
|
||||||
|
ThreadLocalStorage(const ThreadLocalStorage&) = delete;
|
||||||
|
ThreadLocalStorage(ThreadLocalStorage&& other) = delete;
|
||||||
|
ThreadLocalStorage& operator=(const ThreadLocalStorage&) = delete;
|
||||||
|
ThreadLocalStorage& operator=(ThreadLocalStorage&&) = delete;
|
||||||
|
|
||||||
|
// Get the data item for the current thread. If no data is registered so far,
|
||||||
|
// find a free item in the chunks and register it for the current thread.
|
||||||
|
PayloadType* GetThreadLocalData() {
|
||||||
|
auto& tls_system = dereference(tls_system_);
|
||||||
|
|
||||||
|
auto* slot = static_cast<SingleSlot*>(tls_system.GetThreadSpecificData());
|
||||||
|
|
||||||
|
if (UNLIKELY(slot == nullptr)) {
|
||||||
|
slot = FindAndAllocateFreeSlot(root_.load(std::memory_order_relaxed));
|
||||||
|
|
||||||
|
// We might be called in the course of handling a memory allocation. We do
|
||||||
|
// not use CHECK since they might allocate and cause a recursion.
|
||||||
|
TLS_RAW_CHECK("Failed to set thread specific data.",
|
||||||
|
tls_system.SetThreadSpecificData(slot));
|
||||||
|
|
||||||
|
// Reset the content to wipe out any previous data.
|
||||||
|
Reset(slot->item);
|
||||||
|
}
|
||||||
|
|
||||||
|
return &(slot->item);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Encapsulate the payload item and some administrative data.
|
||||||
|
struct SingleSlot {
|
||||||
|
PayloadType item;
|
||||||
|
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||||
|
__cpp_lib_atomic_value_initialization < 201911L
|
||||||
|
std::atomic_flag is_used = ATOMIC_FLAG_INIT;
|
||||||
|
#else
|
||||||
|
std::atomic_flag is_used;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
template <size_t NumberOfItems>
|
||||||
|
struct ChunkT {
|
||||||
|
SingleSlot slots[NumberOfItems];
|
||||||
|
// Pointer to the next chunk.
|
||||||
|
std::atomic<ChunkT*> next_chunk = nullptr;
|
||||||
|
// Helper flag to ensure we create the next chunk only once in a multi
|
||||||
|
// threaded environment.
|
||||||
|
std::once_flag create_next_chunk_flag;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <size_t LowerNumberOfItems,
|
||||||
|
size_t UpperNumberOfItems,
|
||||||
|
size_t NumberOfBytes>
|
||||||
|
static constexpr size_t CalculateEffectiveNumberOfItemsBinSearch() {
|
||||||
|
if constexpr (LowerNumberOfItems == UpperNumberOfItems) {
|
||||||
|
return LowerNumberOfItems;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t CurrentNumberOfItems =
|
||||||
|
(UpperNumberOfItems - LowerNumberOfItems) / 2 + LowerNumberOfItems;
|
||||||
|
|
||||||
|
if constexpr (sizeof(ChunkT<CurrentNumberOfItems>) > NumberOfBytes) {
|
||||||
|
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||||
|
LowerNumberOfItems, CurrentNumberOfItems, NumberOfBytes>();
|
||||||
|
}
|
||||||
|
|
||||||
|
if constexpr (sizeof(ChunkT<CurrentNumberOfItems + 1>) < NumberOfBytes) {
|
||||||
|
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||||
|
CurrentNumberOfItems + 1, UpperNumberOfItems, NumberOfBytes>();
|
||||||
|
}
|
||||||
|
|
||||||
|
return CurrentNumberOfItems;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the maximum number of items we can store in one chunk without the
|
||||||
|
// size of the chunk exceeding NumberOfBytes. To avoid things like alignment
|
||||||
|
// and packing tampering with the calculation, instead of calculating the
|
||||||
|
// correct number of items we use sizeof-operator against ChunkT to search for
|
||||||
|
// the correct size. Unfortunately, the number of recursions is limited by the
|
||||||
|
// compiler. Therefore, we use a binary search instead of a simple linear
|
||||||
|
// search.
|
||||||
|
template <size_t MinimumNumberOfItems, size_t NumberOfBytes>
|
||||||
|
static constexpr size_t CalculateEffectiveNumberOfItems() {
|
||||||
|
if constexpr (sizeof(ChunkT<MinimumNumberOfItems>) < NumberOfBytes) {
|
||||||
|
constexpr size_t LowerNumberOfItems = MinimumNumberOfItems;
|
||||||
|
constexpr size_t UpperNumberOfItems =
|
||||||
|
NumberOfBytes / sizeof(PayloadType) + 1;
|
||||||
|
return CalculateEffectiveNumberOfItemsBinSearch<
|
||||||
|
LowerNumberOfItems, UpperNumberOfItems, NumberOfBytes>();
|
||||||
|
}
|
||||||
|
|
||||||
|
return MinimumNumberOfItems;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
// The minimum number of items per chunk. It should be high enough to
|
||||||
|
// accommodate most items in the root chunk whilst not wasting to much space
|
||||||
|
// on unnecessary items.
|
||||||
|
static constexpr size_t MinimumNumberOfItemsPerChunk = 75;
|
||||||
|
// The effective number of items per chunk. We use the AllocationChunkSize as
|
||||||
|
// a hint to calculate to effective number of items so we occupy one of these
|
||||||
|
// memory chunks to the maximum extent possible.
|
||||||
|
static constexpr size_t ItemsPerChunk =
|
||||||
|
CalculateEffectiveNumberOfItems<MinimumNumberOfItemsPerChunk,
|
||||||
|
AllocationChunkSize>();
|
||||||
|
|
||||||
|
private:
|
||||||
|
using Chunk = ChunkT<ItemsPerChunk>;
|
||||||
|
|
||||||
|
static_assert(ItemsPerChunk >= MinimumNumberOfItemsPerChunk);
|
||||||
|
|
||||||
|
// Mark an item's slot ready for reuse. This function is used as thread
|
||||||
|
// termination function in the TLS system. We do not destroy anything at this
|
||||||
|
// point but simply mark the slot as unused.
|
||||||
|
static void MarkSlotAsFree(void* data) {
|
||||||
|
// We always store SingleSlots in the TLS system. Therefore, we cast to
|
||||||
|
// SingleSlot and reset the is_used flag.
|
||||||
|
auto* const slot = static_cast<SingleSlot*>(data);
|
||||||
|
|
||||||
|
// We might be called in the course of handling a memory allocation.
|
||||||
|
// Therefore, do not use CHECK since it might allocate and cause a
|
||||||
|
// recursion.
|
||||||
|
TLS_RAW_CHECK("Received an invalid slot.",
|
||||||
|
slot && slot->is_used.test_and_set());
|
||||||
|
|
||||||
|
slot->is_used.clear(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform common initialization during construction of an instance.
|
||||||
|
void Initialize(const base::StringPiece instance_id) {
|
||||||
|
// The constructor must be called outside of the allocation path. Therefore,
|
||||||
|
// it is secure to verify with CHECK.
|
||||||
|
|
||||||
|
// Passing MarkSlotAsFree as thread_termination_function we ensure the
|
||||||
|
// slot/item assigned to the finished thread will be returned to the pool of
|
||||||
|
// unused items.
|
||||||
|
CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree, instance_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk* AllocateAndInitializeChunk() {
|
||||||
|
void* const uninitialized_memory =
|
||||||
|
dereference(allocator_).AllocateMemory(sizeof(Chunk));
|
||||||
|
|
||||||
|
// We might be called in the course of handling a memory allocation. We do
|
||||||
|
// not use CHECK since they might allocate and cause a recursion.
|
||||||
|
TLS_RAW_CHECK("Failed to allocate memory for new chunk.",
|
||||||
|
uninitialized_memory != nullptr);
|
||||||
|
|
||||||
|
return new (uninitialized_memory) Chunk{};
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeAndDeallocateChunkForTesting(Chunk* chunk_to_erase) {
|
||||||
|
chunk_to_erase->~Chunk();
|
||||||
|
|
||||||
|
// FreeAndDeallocateChunkForTesting must be called outside of the allocation
|
||||||
|
// path. Therefore, it is secure to verify with CHECK.
|
||||||
|
CHECK(dereference(allocator_)
|
||||||
|
.FreeMemoryForTesting(chunk_to_erase, sizeof(Chunk)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find a free slot in the passed chunk, reserve it and return it to the
|
||||||
|
// caller. If no free slot can be found, head on to the next chunk. If the
|
||||||
|
// next chunk doesn't exist, create it.
|
||||||
|
SingleSlot* FindAndAllocateFreeSlot(Chunk* const chunk) {
|
||||||
|
SingleSlot* const slot = std::find_if_not(
|
||||||
|
std::begin(chunk->slots), std::end(chunk->slots),
|
||||||
|
[](SingleSlot& candidate_slot) {
|
||||||
|
return candidate_slot.is_used.test_and_set(std::memory_order_relaxed);
|
||||||
|
});
|
||||||
|
|
||||||
|
// So we found a slot. Happily return it to the caller.
|
||||||
|
if (slot != std::end(chunk->slots)) {
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ok, there are no more free slots in this chunk. First, ensure the next
|
||||||
|
// chunk is valid and create one if necessary.
|
||||||
|
std::call_once(chunk->create_next_chunk_flag, [&] {
|
||||||
|
// From https://eel.is/c++draft/thread.once.callonce#3
|
||||||
|
//
|
||||||
|
// Synchronization: For any given once_flag: all active executions occur
|
||||||
|
// in a total order; completion of an active execution synchronizes with
|
||||||
|
// the start of the next one in this total order; and the returning
|
||||||
|
// execution synchronizes with the return from all passive executions.
|
||||||
|
//
|
||||||
|
// Therefore, we do only a relaxed store here, call_once synchronizes with
|
||||||
|
// other threads.
|
||||||
|
chunk->next_chunk.store(AllocateAndInitializeChunk(),
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
});
|
||||||
|
|
||||||
|
return FindAndAllocateFreeSlot(chunk->next_chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool IsDestructibleForTestingP = IsDestructibleForTesting>
|
||||||
|
typename std::enable_if<IsDestructibleForTestingP>::type
|
||||||
|
TearDownForTesting() {
|
||||||
|
// The destructor must be called outside of the allocation path. Therefore,
|
||||||
|
// it is secure to verify with CHECK.
|
||||||
|
|
||||||
|
// All accessing threads must be terminated by now. For additional security
|
||||||
|
// we tear down the TLS system first. This way we ensure that
|
||||||
|
// MarkSlotAsFree is not called anymore and we have no accesses from the
|
||||||
|
// TLS system's side.
|
||||||
|
CHECK(dereference(tls_system_).TearDownForTesting());
|
||||||
|
|
||||||
|
// Delete all data chunks.
|
||||||
|
for (auto* chunk = root_.load(); chunk != nullptr;) {
|
||||||
|
auto* next_chunk = chunk->next_chunk.load();
|
||||||
|
FreeAndDeallocateChunkForTesting(chunk);
|
||||||
|
chunk = next_chunk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset a single item to its default value.
|
||||||
|
// Since items are re-used, they may be accessed from different threads,
|
||||||
|
// causing TSan to trigger. Therefore, the reset is exempt from TSan
|
||||||
|
// instrumentation.
|
||||||
|
DISABLE_TSAN_INSTRUMENTATION void Reset(PayloadType& item) { item = {}; }
|
||||||
|
|
||||||
|
AllocatorType allocator_;
|
||||||
|
TLSSystemType tls_system_;
|
||||||
|
std::atomic<Chunk*> const root_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// The ThreadLocalStorage visible to the user. This uses the internal default
|
||||||
|
// allocator and TLS system.
|
||||||
|
template <typename StorageType,
|
||||||
|
typename AllocatorType = internal::DefaultAllocator,
|
||||||
|
typename TLSSystemType = internal::DefaultTLSSystem,
|
||||||
|
size_t AllocationChunkSize = AllocatorType::AllocationChunkSize,
|
||||||
|
bool IsDestructibleForTesting = false>
|
||||||
|
using ThreadLocalStorage =
|
||||||
|
internal::ThreadLocalStorage<StorageType,
|
||||||
|
AllocatorType,
|
||||||
|
TLSSystemType,
|
||||||
|
AllocationChunkSize,
|
||||||
|
IsDestructibleForTesting>;
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#undef TLS_RAW_CHECK_IMPL
|
||||||
|
#undef TLS_RAW_CHECK
|
||||||
|
#undef STR
|
||||||
|
#undef STR_HELPER
|
||||||
|
|
||||||
|
#endif // USE_LOCAL_TLS_EMULATION()
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TLS_H_
|
266
src/base/allocator/early_zone_registration_apple.cc
Normal file
266
src/base/allocator/early_zone_registration_apple.cc
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/early_zone_registration_apple.h"
|
||||||
|
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/shim/early_zone_registration_constants.h"
|
||||||
|
|
||||||
|
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
|
||||||
|
#if defined(BASE_EXPORT)
|
||||||
|
#error "This file cannot depend on //base"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {}
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
// abort_report_np() records the message in a special section that both the
|
||||||
|
// system CrashReporter and Crashpad collect in crash reports. See also in
|
||||||
|
// chrome_exe_main_mac.cc.
|
||||||
|
void abort_report_np(const char* fmt, ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
malloc_zone_t* GetDefaultMallocZone() {
|
||||||
|
// malloc_default_zone() does not return... the default zone, but the
|
||||||
|
// initial one. The default one is the first element of the default zone
|
||||||
|
// array.
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result = malloc_get_all_zones(
|
||||||
|
mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
}
|
||||||
|
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {
|
||||||
|
// Must have static storage duration, as raw pointers are passed to
|
||||||
|
// libsystem_malloc.
|
||||||
|
static malloc_zone_t g_delegating_zone;
|
||||||
|
static malloc_introspection_t g_delegating_zone_introspect;
|
||||||
|
static malloc_zone_t* g_default_zone;
|
||||||
|
|
||||||
|
// Make sure that the default zone is instantiated.
|
||||||
|
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||||
|
|
||||||
|
g_default_zone = GetDefaultMallocZone();
|
||||||
|
|
||||||
|
// The delegating zone:
|
||||||
|
// - Forwards all allocations to the existing default zone
|
||||||
|
// - Does *not* claim to own any memory, meaning that it will always be
|
||||||
|
// skipped in free() in libsystem_malloc.dylib.
|
||||||
|
//
|
||||||
|
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
|
||||||
|
// the main library. Since the main library depends on many external
|
||||||
|
// libraries, we cannot install PartitionAlloc as the default zone without
|
||||||
|
// concurrency issues.
|
||||||
|
//
|
||||||
|
// Instead, what we do is here, while the process is single-threaded:
|
||||||
|
// - Register the delegating zone as the default one.
|
||||||
|
// - Set the original (libsystem_malloc's) one as the second zone
|
||||||
|
//
|
||||||
|
// Later, when PartitionAlloc initializes, we replace the default (delegating)
|
||||||
|
// zone with ours. The end state is:
|
||||||
|
// 1. PartitionAlloc zone
|
||||||
|
// 2. libsystem_malloc zone
|
||||||
|
|
||||||
|
// Set up of the delegating zone. Note that it doesn't just forward calls to
|
||||||
|
// the default zone. This is because the system zone's malloc_zone_t pointer
|
||||||
|
// actually points to a larger struct, containing allocator metadata. So if we
|
||||||
|
// pass as the first parameter the "simple" delegating zone pointer, then we
|
||||||
|
// immediately crash inside the system zone functions. So we need to replace
|
||||||
|
// the zone pointer as well.
|
||||||
|
//
|
||||||
|
// Calls fall into 4 categories:
|
||||||
|
// - Allocation calls: forwarded to the real system zone
|
||||||
|
// - "Is this pointer yours" calls: always answer no
|
||||||
|
// - free(): Should never be called, but is in practice, see comments below.
|
||||||
|
// - Diagnostics and debugging: these are typically called for every
|
||||||
|
// zone. They are no-ops for us, as we don't want to double-count, or lock
|
||||||
|
// the data structures of the real zone twice.
|
||||||
|
|
||||||
|
// Allocation: Forward to the real zone.
|
||||||
|
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->malloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->calloc(g_default_zone, num_items, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->valloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return g_default_zone->realloc(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
|
||||||
|
void** results, unsigned num_requested) {
|
||||||
|
return g_default_zone->batch_malloc(g_default_zone, size, results,
|
||||||
|
num_requested);
|
||||||
|
};
|
||||||
|
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->memalign(g_default_zone, alignment, size);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Does ptr belong to this zone? Return value is != 0 if so.
|
||||||
|
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Free functions.
|
||||||
|
// The normal path for freeing memory is:
|
||||||
|
// 1. Try all zones in order, call zone->size(ptr)
|
||||||
|
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
|
||||||
|
// 3. If no zone matches, crash.
|
||||||
|
//
|
||||||
|
// Since this zone always returns 0 in size() (see above), then zone->free()
|
||||||
|
// should never be called. Unfortunately, this is not the case, as some places
|
||||||
|
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
|
||||||
|
// crashing, forward the call. It's the caller's responsibility to use the
|
||||||
|
// same zone for free() as for the allocation (this is in the contract of
|
||||||
|
// malloc_zone_free()).
|
||||||
|
//
|
||||||
|
// However, note that the sequence of calls size() -> free() is not possible
|
||||||
|
// for this zone, as size() always returns 0.
|
||||||
|
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
return g_default_zone->free(g_default_zone, ptr);
|
||||||
|
};
|
||||||
|
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed) {
|
||||||
|
return g_default_zone->batch_free(g_default_zone, to_be_freed,
|
||||||
|
num_to_be_freed);
|
||||||
|
};
|
||||||
|
#if PA_TRY_FREE_DEFAULT_IS_AVAILABLE
|
||||||
|
g_delegating_zone.try_free_default = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
return g_default_zone->try_free_default(g_default_zone, ptr);
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Diagnostics and debugging.
|
||||||
|
//
|
||||||
|
// Do nothing to reduce memory footprint, the real
|
||||||
|
// zone will do it.
|
||||||
|
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
|
||||||
|
size_t goal) -> size_t { return 0; };
|
||||||
|
|
||||||
|
// Introspection calls are not all optional, for instance locking and
|
||||||
|
// unlocking before/after fork() is not optional.
|
||||||
|
//
|
||||||
|
// Nothing to enumerate.
|
||||||
|
g_delegating_zone_introspect.enumerator =
|
||||||
|
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
vm_range_recorder_t recorder) -> kern_return_t {
|
||||||
|
return KERN_SUCCESS;
|
||||||
|
};
|
||||||
|
// Need to provide a real implementation, it is used for e.g. array sizing.
|
||||||
|
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->introspect->good_size(g_default_zone, size);
|
||||||
|
};
|
||||||
|
// Nothing to do.
|
||||||
|
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
|
||||||
|
boolean_t verbose) {};
|
||||||
|
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
|
||||||
|
// Do not forward the lock / unlock calls. Since the default zone is still
|
||||||
|
// there, we should not lock here, as it would lock the zone twice (all
|
||||||
|
// zones are locked before fork().). Rather, do nothing, since this fake
|
||||||
|
// zone does not need any locking.
|
||||||
|
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
|
||||||
|
// No stats.
|
||||||
|
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
|
||||||
|
malloc_statistics_t* stats) {};
|
||||||
|
// We are not locked.
|
||||||
|
g_delegating_zone_introspect.zone_locked =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
// Don't support discharge checking.
|
||||||
|
g_delegating_zone_introspect.enable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
g_delegating_zone_introspect.disable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
|
||||||
|
void* memory) {};
|
||||||
|
|
||||||
|
// Could use something lower to support fewer functions, but this is
|
||||||
|
// consistent with the real zone installed by PartitionAlloc.
|
||||||
|
g_delegating_zone.version = allocator_shim::kZoneVersion;
|
||||||
|
g_delegating_zone.introspect = &g_delegating_zone_introspect;
|
||||||
|
// This name is used in PartitionAlloc's initialization to determine whether
|
||||||
|
// it should replace the delegating zone.
|
||||||
|
g_delegating_zone.zone_name = allocator_shim::kDelegatingZoneName;
|
||||||
|
|
||||||
|
// Register puts the new zone at the end, unregister swaps the new zone with
|
||||||
|
// the last one.
|
||||||
|
// The zone array is, after these lines, in order:
|
||||||
|
// 1. |g_default_zone|...|g_delegating_zone|
|
||||||
|
// 2. |g_delegating_zone|...| (no more default)
|
||||||
|
// 3. |g_delegating_zone|...|g_default_zone|
|
||||||
|
malloc_zone_register(&g_delegating_zone);
|
||||||
|
malloc_zone_unregister(g_default_zone);
|
||||||
|
malloc_zone_register(g_default_zone);
|
||||||
|
|
||||||
|
// Make sure that the purgeable zone is after the default one.
|
||||||
|
// Will make g_default_zone take the purgeable zone spot
|
||||||
|
malloc_zone_unregister(purgeable_zone);
|
||||||
|
// Add back the purgeable zone as the last one.
|
||||||
|
malloc_zone_register(purgeable_zone);
|
||||||
|
|
||||||
|
// Final configuration:
|
||||||
|
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
|
||||||
|
|
||||||
|
// Sanity check.
|
||||||
|
if (GetDefaultMallocZone() != &g_delegating_zone) {
|
||||||
|
abort_report_np("Failed to install the delegating zone as default.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result = malloc_get_all_zones(
|
||||||
|
mach_task_self(), /*reader=*/nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
}
|
||||||
|
|
||||||
|
// If PartitionAlloc is one of the zones, *change* its name so that
|
||||||
|
// registration can happen multiple times. This works because zone
|
||||||
|
// registration only keeps a pointer to the struct, it does not copy the data.
|
||||||
|
for (unsigned int i = 0; i < zone_count; i++) {
|
||||||
|
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||||
|
if (zone->zone_name &&
|
||||||
|
strcmp(zone->zone_name, allocator_shim::kPartitionAllocZoneName) == 0) {
|
||||||
|
zone->zone_name = "RenamedPartitionAlloc";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
} // namespace partition_alloc
|
29
src/base/allocator/early_zone_registration_apple.h
Normal file
29
src/base/allocator/early_zone_registration_apple.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
||||||
|
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
||||||
|
|
||||||
|
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
|
||||||
|
// the process becomes multi-threaded.
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Must be called *once*, *before* the process becomes multi-threaded.
|
||||||
|
void EarlyMallocZoneRegistration();
|
||||||
|
|
||||||
|
// Tricks the registration code to believe that PartitionAlloc was not already
|
||||||
|
// registered. This allows a future library load to register PartitionAlloc's
|
||||||
|
// zone as well, rather than bailing out.
|
||||||
|
//
|
||||||
|
// This is mutually exclusive with EarlyMallocZoneRegistration(), and should
|
||||||
|
// ideally be removed. Indeed, by allowing two zones to be registered, we still
|
||||||
|
// end up with a split heap, and more memory usage.
|
||||||
|
//
|
||||||
|
// This is a hack for https://crbug.com/1274236.
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_APPLE_H_
|
93
src/base/allocator/miracle_parameter.cc
Normal file
93
src/base/allocator/miracle_parameter.cc
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/miracle_parameter.h"
|
||||||
|
|
||||||
|
#include "base/command_line.h"
|
||||||
|
#include "base/strings/strcat.h"
|
||||||
|
#include "base/system/sys_info.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
namespace miracle_parameter {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
std::string GetFieldTrialParamByFeatureAsString(
|
||||||
|
const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
const std::string& default_value) {
|
||||||
|
const std::string value =
|
||||||
|
base::GetFieldTrialParamValueByFeature(feature, param_name);
|
||||||
|
return value.empty() ? default_value : value;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
std::string GetParamNameWithSuffix(const std::string& param_name) {
|
||||||
|
// `base::SysInfo::AmountOfPhysicalMemoryMB()` refers to CommandLine
|
||||||
|
// internally. If the CommandLine is not initialized, we return early to avoid
|
||||||
|
// a crash.
|
||||||
|
if (!base::CommandLine::InitializedForCurrentProcess()) {
|
||||||
|
return param_name;
|
||||||
|
}
|
||||||
|
int physical_memory_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
|
||||||
|
const char* suffix =
|
||||||
|
physical_memory_mb < kMiracleParameterMemory512MB ? "ForLessThan512MB"
|
||||||
|
: physical_memory_mb < kMiracleParameterMemory1GB ? "For512MBTo1GB"
|
||||||
|
: physical_memory_mb < kMiracleParameterMemory2GB ? "For1GBTo2GB"
|
||||||
|
: physical_memory_mb < kMiracleParameterMemory4GB ? "For2GBTo4GB"
|
||||||
|
: physical_memory_mb < kMiracleParameterMemory8GB ? "For4GBTo8GB"
|
||||||
|
: physical_memory_mb < kMiracleParameterMemory16GB ? "For8GBTo16GB"
|
||||||
|
: "For16GBAndAbove";
|
||||||
|
return base::StrCat({param_name, suffix});
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string GetMiracleParameterAsString(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
const std::string& default_value) {
|
||||||
|
return GetFieldTrialParamByFeatureAsString(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
GetFieldTrialParamByFeatureAsString(feature, param_name, default_value));
|
||||||
|
}
|
||||||
|
|
||||||
|
double GetMiracleParameterAsDouble(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
double default_value) {
|
||||||
|
return base::GetFieldTrialParamByFeatureAsDouble(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
base::GetFieldTrialParamByFeatureAsDouble(feature, param_name,
|
||||||
|
default_value));
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetMiracleParameterAsInt(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
int default_value) {
|
||||||
|
return base::GetFieldTrialParamByFeatureAsInt(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
base::GetFieldTrialParamByFeatureAsInt(feature, param_name,
|
||||||
|
default_value));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GetMiracleParameterAsBool(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
bool default_value) {
|
||||||
|
return base::GetFieldTrialParamByFeatureAsBool(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
base::GetFieldTrialParamByFeatureAsBool(feature, param_name,
|
||||||
|
default_value));
|
||||||
|
}
|
||||||
|
|
||||||
|
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
base::TimeDelta default_value) {
|
||||||
|
return base::GetFieldTrialParamByFeatureAsTimeDelta(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
base::GetFieldTrialParamByFeatureAsTimeDelta(feature, param_name,
|
||||||
|
default_value));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace miracle_parameter
|
||||||
|
|
||||||
|
} // namespace base
|
177
src/base/allocator/miracle_parameter.h
Normal file
177
src/base/allocator/miracle_parameter.h
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
||||||
|
#define BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/containers/span.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/metrics/field_trial_params.h"
|
||||||
|
|
||||||
|
// This is a mirror copy of the //components/miracle_parameter/ to resolve the
|
||||||
|
// dependency cycle of (base->miracle_parameter->base).
|
||||||
|
// Eventually the miracle_parameter component will have a public interface in
|
||||||
|
// //base/ and this could be removed.
|
||||||
|
// TODO(crbug.com/1475915): remove miracle_parameter from
|
||||||
|
// //base/allocator/.
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
namespace miracle_parameter {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
template <typename Enum>
|
||||||
|
Enum GetFieldTrialParamByFeatureAsEnum(
|
||||||
|
const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
const Enum default_value,
|
||||||
|
const base::span<const typename base::FeatureParam<Enum>::Option>&
|
||||||
|
options) {
|
||||||
|
std::string string_value =
|
||||||
|
base::GetFieldTrialParamValueByFeature(feature, param_name);
|
||||||
|
if (string_value.empty()) {
|
||||||
|
return default_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& option : options) {
|
||||||
|
if (string_value == option.name) {
|
||||||
|
return option.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
base::LogInvalidEnumValue(feature, param_name, string_value,
|
||||||
|
static_cast<int>(default_value));
|
||||||
|
return default_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
constexpr int kMiracleParameterMemory512MB = 512;
|
||||||
|
constexpr int kMiracleParameterMemory1GB = 1024;
|
||||||
|
constexpr int kMiracleParameterMemory2GB = 2 * 1024;
|
||||||
|
constexpr int kMiracleParameterMemory4GB = 4 * 1024;
|
||||||
|
constexpr int kMiracleParameterMemory8GB = 8 * 1024;
|
||||||
|
constexpr int kMiracleParameterMemory16GB = 16 * 1024;
|
||||||
|
|
||||||
|
// GetParamNameWithSuffix put a parameter name suffix based on
|
||||||
|
// the amount of physical memory.
|
||||||
|
//
|
||||||
|
// - "ForLessThan512MB" for less than 512MB memory devices.
|
||||||
|
// - "For512MBTo1GB" for 512MB to 1GB memory devices.
|
||||||
|
// - "For1GBTo2GB" for 1GB to 2GB memory devices.
|
||||||
|
// - "For2GBTo4GB" for 2GB to 4GB memory devices.
|
||||||
|
// - "For4GBTo8GB" for 4GB to 8GB memory devices.
|
||||||
|
// - "For8GBTo16GB" for 8GB to 16GB memory devices.
|
||||||
|
// - "For16GBAndAbove" for 16GB memory and above devices.
|
||||||
|
BASE_EXPORT
|
||||||
|
std::string GetParamNameWithSuffix(const std::string& param_name);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<std::string> except the return
|
||||||
|
// value is determined by the amount of physical memory.
|
||||||
|
BASE_EXPORT
|
||||||
|
std::string GetMiracleParameterAsString(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
const std::string& default_value);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<double> except the return value
|
||||||
|
// is determined by the amount of physical memory.
|
||||||
|
BASE_EXPORT
|
||||||
|
double GetMiracleParameterAsDouble(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
double default_value);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<int> except the return value is
|
||||||
|
// determined by the amount of physical memory.
|
||||||
|
BASE_EXPORT
|
||||||
|
int GetMiracleParameterAsInt(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
int default_value);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<bool> except the return value
|
||||||
|
// is determined by the amount of physical memory.
|
||||||
|
BASE_EXPORT
|
||||||
|
bool GetMiracleParameterAsBool(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
bool default_value);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<base::TimeDelta> except the
|
||||||
|
// return value is determined by the amount of physical memory.
|
||||||
|
BASE_EXPORT
|
||||||
|
base::TimeDelta GetMiracleParameterAsTimeDelta(const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
base::TimeDelta default_value);
|
||||||
|
|
||||||
|
// Provides a similar behavior with FeatureParam<Enum> except the return value
|
||||||
|
// is determined by the amount of physical memory.
|
||||||
|
template <typename Enum>
|
||||||
|
Enum GetMiracleParameterAsEnum(
|
||||||
|
const base::Feature& feature,
|
||||||
|
const std::string& param_name,
|
||||||
|
const Enum default_value,
|
||||||
|
const base::span<const typename base::FeatureParam<Enum>::Option> options) {
|
||||||
|
return GetFieldTrialParamByFeatureAsEnum(
|
||||||
|
feature, GetParamNameWithSuffix(param_name),
|
||||||
|
GetFieldTrialParamByFeatureAsEnum(feature, param_name, default_value,
|
||||||
|
options),
|
||||||
|
options);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_STRING(function_name, feature, param_name, \
|
||||||
|
default_value) \
|
||||||
|
std::string function_name() { \
|
||||||
|
static const std::string value = \
|
||||||
|
miracle_parameter::GetMiracleParameterAsString(feature, param_name, \
|
||||||
|
default_value); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_DOUBLE(function_name, feature, param_name, \
|
||||||
|
default_value) \
|
||||||
|
double function_name() { \
|
||||||
|
static const double value = \
|
||||||
|
miracle_parameter::GetMiracleParameterAsDouble(feature, param_name, \
|
||||||
|
default_value); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_INT(function_name, feature, param_name, \
|
||||||
|
default_value) \
|
||||||
|
int function_name() { \
|
||||||
|
static const int value = miracle_parameter::GetMiracleParameterAsInt( \
|
||||||
|
feature, param_name, default_value); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_BOOL(function_name, feature, param_name, \
|
||||||
|
default_value) \
|
||||||
|
bool function_name() { \
|
||||||
|
static const bool value = miracle_parameter::GetMiracleParameterAsBool( \
|
||||||
|
feature, param_name, default_value); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_TIME_DELTA(function_name, feature, param_name, \
|
||||||
|
default_value) \
|
||||||
|
base::TimeDelta function_name() { \
|
||||||
|
static const base::TimeDelta value = \
|
||||||
|
miracle_parameter::GetMiracleParameterAsTimeDelta(feature, param_name, \
|
||||||
|
default_value); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MIRACLE_PARAMETER_FOR_ENUM(function_name, feature, param_name, \
|
||||||
|
default_value, type, options) \
|
||||||
|
type function_name() { \
|
||||||
|
static const type value = miracle_parameter::GetMiracleParameterAsEnum( \
|
||||||
|
feature, param_name, default_value, base::make_span(options)); \
|
||||||
|
return value; \
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace miracle_parameter
|
||||||
|
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_MIRACLE_PARAMETER_H_
|
508
src/base/allocator/partition_alloc_features.cc
Normal file
508
src/base/allocator/partition_alloc_features.cc
Normal file
@ -0,0 +1,508 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
|
||||||
|
#include "base/allocator/miracle_parameter.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/features.h"
|
||||||
|
#include "base/metrics/field_trial_params.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "build/chromecast_buildflags.h"
|
||||||
|
#include "build/chromeos_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/time/time.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
#include "partition_alloc/shim/allocator_shim_dispatch_to_noop_on_free.h"
|
||||||
|
#include "partition_alloc/thread_cache.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
|
||||||
|
"PartitionAllocUnretainedDanglingPtr",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
|
||||||
|
kUnretainedDanglingPtrModeOption[] = {
|
||||||
|
{UnretainedDanglingPtrMode::kCrash, "crash"},
|
||||||
|
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
|
||||||
|
"dump_without_crashing"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<UnretainedDanglingPtrMode>
|
||||||
|
kUnretainedDanglingPtrModeParam = {
|
||||||
|
&kPartitionAllocUnretainedDanglingPtr,
|
||||||
|
"mode",
|
||||||
|
UnretainedDanglingPtrMode::kCrash,
|
||||||
|
&kUnretainedDanglingPtrModeOption,
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocDanglingPtr,
|
||||||
|
"PartitionAllocDanglingPtr",
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG) || \
|
||||||
|
(BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) && \
|
||||||
|
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)) && !defined(OFFICIAL_BUILD) && \
|
||||||
|
(!defined(NDEBUG) || DCHECK_IS_ON()))
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
|
||||||
|
{DanglingPtrMode::kCrash, "crash"},
|
||||||
|
{DanglingPtrMode::kLogOnly, "log_only"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
|
||||||
|
&kPartitionAllocDanglingPtr,
|
||||||
|
"mode",
|
||||||
|
DanglingPtrMode::kCrash,
|
||||||
|
&kDanglingPtrModeOption,
|
||||||
|
};
|
||||||
|
constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
|
||||||
|
{DanglingPtrType::kAll, "all"},
|
||||||
|
{DanglingPtrType::kCrossTask, "cross_task"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
|
||||||
|
&kPartitionAllocDanglingPtr,
|
||||||
|
"type",
|
||||||
|
DanglingPtrType::kAll,
|
||||||
|
&kDanglingPtrTypeOption,
|
||||||
|
};
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_STARSCAN)
|
||||||
|
// If enabled, PCScan is turned on by default for all partitions that don't
|
||||||
|
// disable it explicitly.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScan,
|
||||||
|
"PartitionAllocPCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
#endif // BUILDFLAG(USE_STARSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// If enabled, PCScan is turned on only for the browser's malloc partition.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
|
||||||
|
"PartitionAllocPCScanBrowserOnly",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// If enabled, PCScan is turned on only for the renderer's malloc partition.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
|
||||||
|
"PartitionAllocPCScanRendererOnly",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// Use a larger maximum thread cache cacheable bucket size.
|
||||||
|
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
|
||||||
|
"PartitionAllocLargeThreadCacheSize",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_INT(
|
||||||
|
GetPartitionAllocLargeThreadCacheSizeValue,
|
||||||
|
kPartitionAllocLargeThreadCacheSize,
|
||||||
|
"PartitionAllocLargeThreadCacheSizeValue",
|
||||||
|
::partition_alloc::ThreadCacheLimits::kLargeSizeThreshold)
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_INT(
|
||||||
|
GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid,
|
||||||
|
kPartitionAllocLargeThreadCacheSize,
|
||||||
|
"PartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid",
|
||||||
|
::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold)
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
|
||||||
|
"PartitionAllocLargeEmptySlotSpanRing",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocSchedulerLoopQuarantine,
|
||||||
|
"PartitionAllocSchedulerLoopQuarantine",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
// Scheduler Loop Quarantine's capacity in bytes.
|
||||||
|
const base::FeatureParam<int> kPartitionAllocSchedulerLoopQuarantineCapacity{
|
||||||
|
&kPartitionAllocSchedulerLoopQuarantine,
|
||||||
|
"PartitionAllocSchedulerLoopQuarantineCapacity", 0};
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocZappingByFreeFlags,
|
||||||
|
"PartitionAllocZappingByFreeFlags",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocBackupRefPtr,
|
||||||
|
"PartitionAllocBackupRefPtr",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
|
||||||
|
BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
|
||||||
|
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CASTOS)) || \
|
||||||
|
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
|
||||||
|
kBackupRefPtrEnabledProcessesOptions[] = {
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
|
||||||
|
"browser-and-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "enabled-processes",
|
||||||
|
BackupRefPtrEnabledProcesses::kNonRenderer,
|
||||||
|
&kBackupRefPtrEnabledProcessesOptions};
|
||||||
|
|
||||||
|
// Map *-with-memory-reclaimer modes onto their counterpars without the suffix.
|
||||||
|
// They are the same, as memory reclaimer is now controlled independently.
|
||||||
|
//
|
||||||
|
// Similarly, map disabled-but-*-way-split onto plain disabled, as we are done
|
||||||
|
// experimenting with partition split.
|
||||||
|
//
|
||||||
|
// We need to keep those option strings, as there is a long tail of clients that
|
||||||
|
// may have an old field trial config, which used these modes.
|
||||||
|
//
|
||||||
|
// DO NOT USE *-with-memory-reclaimer and disabled-but-*-way-split modes in new
|
||||||
|
// configs!
|
||||||
|
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
|
||||||
|
{BackupRefPtrMode::kDisabled, "disabled"},
|
||||||
|
{BackupRefPtrMode::kEnabled, "enabled"},
|
||||||
|
{BackupRefPtrMode::kEnabled, "enabled-with-memory-reclaimer"},
|
||||||
|
{BackupRefPtrMode::kEnabledInSameSlotMode, "enabled-in-same-slot-mode"},
|
||||||
|
{BackupRefPtrMode::kDisabled, "disabled-but-2-way-split"},
|
||||||
|
{BackupRefPtrMode::kDisabled,
|
||||||
|
"disabled-but-2-way-split-with-memory-reclaimer"},
|
||||||
|
{BackupRefPtrMode::kDisabled, "disabled-but-3-way-split"},
|
||||||
|
};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "brp-mode",
|
||||||
|
BackupRefPtrMode::kEnabledInSameSlotMode, &kBackupRefPtrModeOptions};
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocMemoryTagging,
|
||||||
|
"PartitionAllocMemoryTagging",
|
||||||
|
#if BUILDFLAG(USE_FULL_MTE)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
constexpr FeatureParam<MemtagMode>::Option kMemtagModeOptions[] = {
|
||||||
|
{MemtagMode::kSync, "sync"},
|
||||||
|
{MemtagMode::kAsync, "async"}};
|
||||||
|
|
||||||
|
const base::FeatureParam<MemtagMode> kMemtagModeParam{
|
||||||
|
&kPartitionAllocMemoryTagging, "memtag-mode",
|
||||||
|
#if BUILDFLAG(USE_FULL_MTE)
|
||||||
|
MemtagMode::kSync,
|
||||||
|
#else
|
||||||
|
MemtagMode::kAsync,
|
||||||
|
#endif
|
||||||
|
&kMemtagModeOptions};
|
||||||
|
|
||||||
|
constexpr FeatureParam<MemoryTaggingEnabledProcesses>::Option
|
||||||
|
kMemoryTaggingEnabledProcessesOptions[] = {
|
||||||
|
{MemoryTaggingEnabledProcesses::kBrowserOnly, "browser-only"},
|
||||||
|
{MemoryTaggingEnabledProcesses::kNonRenderer, "non-renderer"},
|
||||||
|
{MemoryTaggingEnabledProcesses::kAllProcesses, "all-processes"}};
|
||||||
|
|
||||||
|
const base::FeatureParam<MemoryTaggingEnabledProcesses>
|
||||||
|
kMemoryTaggingEnabledProcessesParam{
|
||||||
|
&kPartitionAllocMemoryTagging, "enabled-processes",
|
||||||
|
#if BUILDFLAG(USE_FULL_MTE)
|
||||||
|
MemoryTaggingEnabledProcesses::kAllProcesses,
|
||||||
|
#else
|
||||||
|
MemoryTaggingEnabledProcesses::kBrowserOnly,
|
||||||
|
#endif
|
||||||
|
&kMemoryTaggingEnabledProcessesOptions};
|
||||||
|
|
||||||
|
BASE_FEATURE(kKillPartitionAllocMemoryTagging,
|
||||||
|
"KillPartitionAllocMemoryTagging",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
|
||||||
|
BASE_FEATURE(kPartitionAllocPermissiveMte,
|
||||||
|
"PartitionAllocPermissiveMte",
|
||||||
|
#if BUILDFLAG(USE_FULL_MTE)
|
||||||
|
// We want to actually crash if USE_FULL_MTE is enabled.
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
|
||||||
|
false}; // Not much noise at the moment to enable by default.
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
|
||||||
|
|
||||||
|
// If enabled, switches the bucket distribution to a denser one.
|
||||||
|
//
|
||||||
|
// We enable this by default everywhere except for 32-bit Android, since we saw
|
||||||
|
// regressions there.
|
||||||
|
BASE_FEATURE(kPartitionAllocUseDenserDistribution,
|
||||||
|
"PartitionAllocUseDenserDistribution",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
);
|
||||||
|
const base::FeatureParam<BucketDistributionMode>::Option
|
||||||
|
kPartitionAllocBucketDistributionOption[] = {
|
||||||
|
{BucketDistributionMode::kDefault, "default"},
|
||||||
|
{BucketDistributionMode::kDenser, "denser"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<BucketDistributionMode>
|
||||||
|
kPartitionAllocBucketDistributionParam {
|
||||||
|
&kPartitionAllocUseDenserDistribution, "mode",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
BucketDistributionMode::kDefault,
|
||||||
|
#else
|
||||||
|
BucketDistributionMode::kDenser,
|
||||||
|
#endif // BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
&kPartitionAllocBucketDistributionOption
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocMemoryReclaimer,
|
||||||
|
"PartitionAllocMemoryReclaimer",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
const base::FeatureParam<TimeDelta> kPartitionAllocMemoryReclaimerInterval = {
|
||||||
|
&kPartitionAllocMemoryReclaimer, "interval",
|
||||||
|
TimeDelta(), // Defaults to zero.
|
||||||
|
};
|
||||||
|
|
||||||
|
// Configures whether we set a lower limit for renderers that do not have a main
|
||||||
|
// frame, similar to the limit that is already done for backgrounded renderers.
|
||||||
|
BASE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers,
|
||||||
|
"LowerPAMemoryLimitForNonMainRenderers",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
|
||||||
|
// affect whether PCScan is enabled itself.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
|
||||||
|
"PartitionAllocPCScanMUAwareScheduler",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// If enabled, PCScan frees unconditionally all quarantined objects.
|
||||||
|
// This is a performance testing feature.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
|
||||||
|
"PartitionAllocPCScanImmediateFreeing",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// If enabled, PCScan clears eagerly (synchronously) on free().
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
|
||||||
|
"PartitionAllocPCScanEagerClearing",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// In addition to heap, scan also the stack of the current mutator.
|
||||||
|
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
|
||||||
|
"PartitionAllocPCScanStackScanning",
|
||||||
|
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
|
||||||
|
);
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocDCScan,
|
||||||
|
"PartitionAllocDCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// Whether to straighten free lists for larger slot spans in PurgeMemory() ->
|
||||||
|
// ... -> PartitionPurgeSlotSpan().
|
||||||
|
BASE_FEATURE(kPartitionAllocStraightenLargerSlotSpanFreeLists,
|
||||||
|
"PartitionAllocStraightenLargerSlotSpanFreeLists",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
const base::FeatureParam<
|
||||||
|
partition_alloc::StraightenLargerSlotSpanFreeListsMode>::Option
|
||||||
|
kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption[] = {
|
||||||
|
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::
|
||||||
|
kOnlyWhenUnprovisioning,
|
||||||
|
"only-when-unprovisioning"},
|
||||||
|
{partition_alloc::StraightenLargerSlotSpanFreeListsMode::kAlways,
|
||||||
|
"always"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
|
||||||
|
kPartitionAllocStraightenLargerSlotSpanFreeListsMode = {
|
||||||
|
&kPartitionAllocStraightenLargerSlotSpanFreeLists,
|
||||||
|
"mode",
|
||||||
|
partition_alloc::StraightenLargerSlotSpanFreeListsMode::
|
||||||
|
kOnlyWhenUnprovisioning,
|
||||||
|
&kPartitionAllocStraightenLargerSlotSpanFreeListsModeOption,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Whether to sort free lists for smaller slot spans in PurgeMemory().
|
||||||
|
BASE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists,
|
||||||
|
"PartitionAllocSortSmallerSlotSpanFreeLists",
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
// Whether to sort the active slot spans in PurgeMemory().
|
||||||
|
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
|
||||||
|
"PartitionAllocSortActiveSlotSpans",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// Whether to retry allocations when commit fails.
|
||||||
|
BASE_FEATURE(kPageAllocatorRetryOnCommitFailure,
|
||||||
|
"PageAllocatorRetryOnCommitFailure",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
// A parameter to exclude or not exclude PartitionAllocSupport from
|
||||||
|
// PartialLowModeOnMidRangeDevices. This is used to see how it affects
|
||||||
|
// renderer performances, e.g. blink_perf.parser benchmark.
|
||||||
|
// The feature: kPartialLowEndModeOnMidRangeDevices is defined in
|
||||||
|
// //base/features.cc. Since the following feature param is related to
|
||||||
|
// PartitionAlloc, define the param here.
|
||||||
|
const FeatureParam<bool> kPartialLowEndModeExcludePartitionAllocSupport{
|
||||||
|
&kPartialLowEndModeOnMidRangeDevices, "exclude-partition-alloc-support",
|
||||||
|
false};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
BASE_FEATURE(kEnableConfigurableThreadCacheMultiplier,
|
||||||
|
"EnableConfigurableThreadCacheMultiplier",
|
||||||
|
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplier,
|
||||||
|
kEnableConfigurableThreadCacheMultiplier,
|
||||||
|
"ThreadCacheMultiplier",
|
||||||
|
2.)
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_DOUBLE(GetThreadCacheMultiplierForAndroid,
|
||||||
|
kEnableConfigurableThreadCacheMultiplier,
|
||||||
|
"ThreadCacheMultiplierForAndroid",
|
||||||
|
1.)
|
||||||
|
|
||||||
|
constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
|
||||||
|
base::TimeDelta time_delta) {
|
||||||
|
return partition_alloc::internal::base::Microseconds(
|
||||||
|
time_delta.InMicroseconds());
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr base::TimeDelta FromPartitionAllocTimeDelta(
|
||||||
|
partition_alloc::internal::base::TimeDelta time_delta) {
|
||||||
|
return base::Microseconds(time_delta.InMicroseconds());
|
||||||
|
}
|
||||||
|
|
||||||
|
BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
|
||||||
|
"EnableConfigurableThreadCachePurgeInterval",
|
||||||
|
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||||
|
GetThreadCacheMinPurgeIntervalValue,
|
||||||
|
kEnableConfigurableThreadCachePurgeInterval,
|
||||||
|
"ThreadCacheMinPurgeInterval",
|
||||||
|
FromPartitionAllocTimeDelta(partition_alloc::kMinPurgeInterval))
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||||
|
GetThreadCacheMaxPurgeIntervalValue,
|
||||||
|
kEnableConfigurableThreadCachePurgeInterval,
|
||||||
|
"ThreadCacheMaxPurgeInterval",
|
||||||
|
FromPartitionAllocTimeDelta(partition_alloc::kMaxPurgeInterval))
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_TIME_DELTA(
|
||||||
|
GetThreadCacheDefaultPurgeIntervalValue,
|
||||||
|
kEnableConfigurableThreadCachePurgeInterval,
|
||||||
|
"ThreadCacheDefaultPurgeInterval",
|
||||||
|
FromPartitionAllocTimeDelta(partition_alloc::kDefaultPurgeInterval))
|
||||||
|
|
||||||
|
const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheMinPurgeInterval() {
|
||||||
|
return ToPartitionAllocTimeDelta(GetThreadCacheMinPurgeIntervalValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheMaxPurgeInterval() {
|
||||||
|
return ToPartitionAllocTimeDelta(GetThreadCacheMaxPurgeIntervalValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheDefaultPurgeInterval() {
|
||||||
|
return ToPartitionAllocTimeDelta(GetThreadCacheDefaultPurgeIntervalValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
|
||||||
|
"EnableConfigurableThreadCacheMinCachedMemoryForPurging",
|
||||||
|
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
MIRACLE_PARAMETER_FOR_INT(
|
||||||
|
GetThreadCacheMinCachedMemoryForPurgingBytes,
|
||||||
|
kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
|
||||||
|
"ThreadCacheMinCachedMemoryForPurgingBytes",
|
||||||
|
partition_alloc::kMinCachedMemoryForPurgingBytes)
|
||||||
|
|
||||||
|
// An apparent quarantine leak in the buffer partition unacceptably
|
||||||
|
// bloats memory when MiraclePtr is enabled in the renderer process.
|
||||||
|
// We believe we have found and patched the leak, but out of an
|
||||||
|
// abundance of caution, we provide this toggle that allows us to
|
||||||
|
// wholly disable MiraclePtr in the buffer partition, if necessary.
|
||||||
|
//
|
||||||
|
// TODO(crbug.com/1444624): this is unneeded once
|
||||||
|
// MiraclePtr-for-Renderer launches.
|
||||||
|
BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
|
||||||
|
"PartitionAllocDisableBRPInBufferPartition",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
|
||||||
|
BASE_FEATURE(kUsePoolOffsetFreelists,
|
||||||
|
"PartitionAllocUsePoolOffsetFreelists",
|
||||||
|
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown,
|
||||||
|
"PartitionAllocMakeFreeNoOpOnShutdown",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
constexpr FeatureParam<WhenFreeBecomesNoOp>::Option
|
||||||
|
kPartitionAllocMakeFreeNoOpOnShutdownOptions[] = {
|
||||||
|
{
|
||||||
|
WhenFreeBecomesNoOp::kBeforeShutDownThreads,
|
||||||
|
"before-shutdown-threads",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
WhenFreeBecomesNoOp::kInShutDownThreads,
|
||||||
|
"in-shutdown-threads",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
WhenFreeBecomesNoOp::kAfterShutDownThreads,
|
||||||
|
"after-shutdown-threads",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const base::FeatureParam<WhenFreeBecomesNoOp>
|
||||||
|
kPartitionAllocMakeFreeNoOpOnShutdownParam{
|
||||||
|
&kPartitionAllocMakeFreeNoOpOnShutdown, "callsite",
|
||||||
|
WhenFreeBecomesNoOp::kBeforeShutDownThreads,
|
||||||
|
&kPartitionAllocMakeFreeNoOpOnShutdownOptions};
|
||||||
|
|
||||||
|
void MakeFreeNoOp(WhenFreeBecomesNoOp callsite) {
|
||||||
|
CHECK(base::FeatureList::GetInstance());
|
||||||
|
// Ignoring `free()` during Shutdown would allow developers to introduce new
|
||||||
|
// dangling pointers. So we want to avoid ignoring free when it is enabled.
|
||||||
|
// Note: For now, the DanglingPointerDetector is only enabled on 5 bots, and
|
||||||
|
// on linux non-official configuration.
|
||||||
|
// TODO(b/40802063): Reconsider this decision after the experiment.
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
if (base::FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
if (base::FeatureList::IsEnabled(kPartitionAllocMakeFreeNoOpOnShutdown) &&
|
||||||
|
kPartitionAllocMakeFreeNoOpOnShutdownParam.Get() == callsite) {
|
||||||
|
allocator_shim::InsertNoOpOnFreeAllocatorShimOnShutDown();
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
}
|
||||||
|
|
||||||
|
BASE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground,
|
||||||
|
"PartitionAllocAdjustSizeWhenInForeground",
|
||||||
|
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
228
src/base/allocator/partition_alloc_features.h
Normal file
228
src/base/allocator/partition_alloc_features.h
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/metrics/field_trial_params.h"
|
||||||
|
#include "base/strings/string_piece.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/time/time.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
|
||||||
|
enum class UnretainedDanglingPtrMode {
|
||||||
|
kCrash,
|
||||||
|
kDumpWithoutCrashing,
|
||||||
|
};
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
|
||||||
|
kUnretainedDanglingPtrModeParam;
|
||||||
|
|
||||||
|
// See /docs/dangling_ptr.md
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
|
||||||
|
enum class DanglingPtrMode {
|
||||||
|
// Crash immediately after detecting a dangling raw_ptr.
|
||||||
|
kCrash, // (default)
|
||||||
|
|
||||||
|
// Log the signature of every occurrences without crashing. It is used by
|
||||||
|
// bots.
|
||||||
|
// Format "[DanglingSignature]\t<1>\t<2>\t<3>\t<4>"
|
||||||
|
// 1. The function which freed the memory while it was still referenced.
|
||||||
|
// 2. The task in which the memory was freed.
|
||||||
|
// 3. The function which released the raw_ptr reference.
|
||||||
|
// 4. The task in which the raw_ptr was released.
|
||||||
|
kLogOnly,
|
||||||
|
|
||||||
|
// Note: This will be extended with a single shot DumpWithoutCrashing.
|
||||||
|
};
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
|
||||||
|
kDanglingPtrModeParam;
|
||||||
|
enum class DanglingPtrType {
|
||||||
|
// Act on any dangling raw_ptr released after being freed.
|
||||||
|
kAll, // (default)
|
||||||
|
|
||||||
|
// Detect when freeing memory and releasing the dangling raw_ptr happens in
|
||||||
|
// a different task. Those are more likely to cause use after free.
|
||||||
|
kCrossTask,
|
||||||
|
|
||||||
|
// Note: This will be extended with LongLived
|
||||||
|
};
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
|
||||||
|
kDanglingPtrTypeParam;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_STARSCAN)
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
|
||||||
|
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValue();
|
||||||
|
BASE_EXPORT int GetPartitionAllocLargeThreadCacheSizeValueForLowRAMAndroid();
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSchedulerLoopQuarantine);
|
||||||
|
// Scheduler Loop Quarantine's capacity in bytes.
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<int>
|
||||||
|
kPartitionAllocSchedulerLoopQuarantineCapacity;
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocZappingByFreeFlags);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
enum class BackupRefPtrEnabledProcesses {
|
||||||
|
// BRP enabled only in the browser process.
|
||||||
|
kBrowserOnly,
|
||||||
|
// BRP enabled only in the browser and renderer processes.
|
||||||
|
kBrowserAndRenderer,
|
||||||
|
// BRP enabled in all processes, except renderer.
|
||||||
|
kNonRenderer,
|
||||||
|
// BRP enabled in all processes.
|
||||||
|
kAllProcesses,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class BackupRefPtrMode {
|
||||||
|
// BRP is disabled across all partitions. Equivalent to the Finch flag being
|
||||||
|
// disabled.
|
||||||
|
kDisabled,
|
||||||
|
|
||||||
|
// BRP is enabled in the main partition, as well as certain Renderer-only
|
||||||
|
// partitions (if enabled in Renderer at all).
|
||||||
|
kEnabled,
|
||||||
|
|
||||||
|
// As above, but "same slot" mode is used, as opposed to "previous slot".
|
||||||
|
// This means that ref-count is placed at the end of the same slot as the
|
||||||
|
// object it protects, as opposed to the end of the previous slot.
|
||||||
|
kEnabledInSameSlotMode,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class MemtagMode {
|
||||||
|
// memtagMode will be SYNC.
|
||||||
|
kSync,
|
||||||
|
// memtagMode will be ASYNC.
|
||||||
|
kAsync,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class MemoryTaggingEnabledProcesses {
|
||||||
|
// Memory tagging enabled only in the browser process.
|
||||||
|
kBrowserOnly,
|
||||||
|
// Memory tagging enabled in all processes, except renderer.
|
||||||
|
kNonRenderer,
|
||||||
|
// Memory tagging enabled in all processes.
|
||||||
|
kAllProcesses,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class BucketDistributionMode : uint8_t {
|
||||||
|
kDefault,
|
||||||
|
kDenser,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parameter for 'kPartitionAllocMakeFreeNoOpOnShutdown' feature which
|
||||||
|
// controls when free() becomes a no-op during Shutdown()
|
||||||
|
enum class WhenFreeBecomesNoOp {
|
||||||
|
// Allocator is inserted either before, in, or after shutdown threads
|
||||||
|
kBeforeShutDownThreads,
|
||||||
|
kInShutDownThreads,
|
||||||
|
kAfterShutDownThreads,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Inserts a no-op on 'free()' allocator shim at the front of the
|
||||||
|
// dispatch chain if called from the appropriate callsite.
|
||||||
|
BASE_EXPORT void MakeFreeNoOp(WhenFreeBecomesNoOp callsite);
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMakeFreeNoOpOnShutdown);
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<WhenFreeBecomesNoOp>
|
||||||
|
kPartitionAllocMakeFreeNoOpOnShutdownParam;
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
|
||||||
|
kBackupRefPtrModeParam;
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryTagging);
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<MemtagMode> kMemtagModeParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<MemoryTaggingEnabledProcesses>
|
||||||
|
kMemoryTaggingEnabledProcessesParam;
|
||||||
|
// Kill switch for memory tagging. Skips any code related to memory tagging when
|
||||||
|
// enabled.
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kKillPartitionAllocMemoryTagging);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPermissiveMte);
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableDereferenceCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableExtractionCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableInstantiationCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BucketDistributionMode>
|
||||||
|
kPartitionAllocBucketDistributionParam;
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kLowerPAMemoryLimitForNonMainRenderers);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseDenserDistribution);
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocMemoryReclaimer);
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<TimeDelta>
|
||||||
|
kPartitionAllocMemoryReclaimerInterval;
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(
|
||||||
|
kPartitionAllocStraightenLargerSlotSpanFreeLists);
|
||||||
|
extern const BASE_EXPORT
|
||||||
|
base::FeatureParam<partition_alloc::StraightenLargerSlotSpanFreeListsMode>
|
||||||
|
kPartitionAllocStraightenLargerSlotSpanFreeListsMode;
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortSmallerSlotSpanFreeLists);
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
extern const base::FeatureParam<bool>
|
||||||
|
kPartialLowEndModeExcludePartitionAllocSupport;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCacheMultiplier);
|
||||||
|
BASE_EXPORT double GetThreadCacheMultiplier();
|
||||||
|
BASE_EXPORT double GetThreadCacheMultiplierForAndroid();
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCachePurgeInterval);
|
||||||
|
extern const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheMinPurgeInterval();
|
||||||
|
extern const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheMaxPurgeInterval();
|
||||||
|
extern const partition_alloc::internal::base::TimeDelta
|
||||||
|
GetThreadCacheDefaultPurgeInterval();
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(
|
||||||
|
kEnableConfigurableThreadCacheMinCachedMemoryForPurging);
|
||||||
|
BASE_EXPORT int GetThreadCacheMinCachedMemoryForPurgingBytes();
|
||||||
|
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
|
||||||
|
|
||||||
|
// This feature is additionally gated behind a buildflag because
|
||||||
|
// pool offset freelists cannot be represented when PartitionAlloc uses
|
||||||
|
// 32-bit pointers.
|
||||||
|
#if BUILDFLAG(USE_FREELIST_POOL_OFFSETS)
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kUsePoolOffsetFreelists);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// When set, partitions use a larger ring buffer and free memory less
|
||||||
|
// aggressively when in the foreground.
|
||||||
|
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocAdjustSizeWhenInForeground);
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
1480
src/base/allocator/partition_alloc_support.cc
Normal file
1480
src/base/allocator/partition_alloc_support.cc
Normal file
File diff suppressed because it is too large
Load Diff
123
src/base/allocator/partition_alloc_support.h
Normal file
123
src/base/allocator/partition_alloc_support.h
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/memory/scoped_refptr.h"
|
||||||
|
#include "base/synchronization/lock.h"
|
||||||
|
#include "base/task/sequenced_task_runner.h"
|
||||||
|
#include "base/thread_annotations.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
#include "partition_alloc/thread_cache.h"
|
||||||
|
|
||||||
|
namespace base::allocator {
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_STARSCAN)
|
||||||
|
BASE_EXPORT void RegisterPCScanStatsReporter();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Starts a periodic timer on the current thread to purge all thread caches.
|
||||||
|
BASE_EXPORT void StartThreadCachePeriodicPurge();
|
||||||
|
|
||||||
|
BASE_EXPORT void StartMemoryReclaimer(
|
||||||
|
scoped_refptr<SequencedTaskRunner> task_runner);
|
||||||
|
|
||||||
|
BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
|
||||||
|
|
||||||
|
// Install handlers for when dangling raw_ptr(s) have been detected. This prints
|
||||||
|
// two StackTraces. One where the memory is freed, one where the last dangling
|
||||||
|
// raw_ptr stopped referencing it.
|
||||||
|
//
|
||||||
|
// This is currently effective, only when compiled with
|
||||||
|
// `enable_dangling_raw_ptr_checks` build flag.
|
||||||
|
BASE_EXPORT void InstallDanglingRawPtrChecks();
|
||||||
|
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
|
||||||
|
|
||||||
|
// Allows to re-configure PartitionAlloc at run-time.
|
||||||
|
class BASE_EXPORT PartitionAllocSupport {
|
||||||
|
public:
|
||||||
|
struct BrpConfiguration {
|
||||||
|
bool enable_brp = false;
|
||||||
|
bool in_slot_metadata_in_same_slot = false;
|
||||||
|
bool process_affected_by_brp_flag = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to
|
||||||
|
// configure PartitionAlloc before/at its initialization using information not
|
||||||
|
// known at compile-time (e.g. process type, Finch), because by the time this
|
||||||
|
// information is available memory allocations would have surely happened,
|
||||||
|
// that requiring a functioning allocator.
|
||||||
|
//
|
||||||
|
// *Earlyish() is called as early as it is reasonably possible.
|
||||||
|
// *AfterZygoteFork() is its complement to finish configuring process-specific
|
||||||
|
// stuff that had to be postponed due to *Earlyish() being called with
|
||||||
|
// |process_type==kZygoteProcess|.
|
||||||
|
// *AfterFeatureListInit() is called in addition to the above, once
|
||||||
|
// FeatureList has been initialized and ready to use. It is guaranteed to be
|
||||||
|
// called on non-zygote processes or after the zygote has been forked.
|
||||||
|
// *AfterTaskRunnerInit() is called once it is possible to post tasks, and
|
||||||
|
// after the previous steps.
|
||||||
|
//
|
||||||
|
// *Earlyish() must be called exactly once. *AfterZygoteFork() must be called
|
||||||
|
// once iff *Earlyish() was called before with |process_type==kZygoteProcess|.
|
||||||
|
//
|
||||||
|
// *AfterFeatureListInit() may be called more than once, but will perform its
|
||||||
|
// re-configuration steps exactly once.
|
||||||
|
//
|
||||||
|
// *AfterTaskRunnerInit() may be called more than once.
|
||||||
|
void ReconfigureForTests();
|
||||||
|
void ReconfigureEarlyish(const std::string& process_type);
|
||||||
|
void ReconfigureAfterZygoteFork(const std::string& process_type);
|
||||||
|
void ReconfigureAfterFeatureListInit(
|
||||||
|
const std::string& process_type,
|
||||||
|
bool configure_dangling_pointer_detector = true);
|
||||||
|
void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
|
||||||
|
|
||||||
|
// |has_main_frame| tells us if the renderer contains a main frame.
|
||||||
|
void OnForegrounded(bool has_main_frame);
|
||||||
|
void OnBackgrounded();
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
static std::string ExtractDanglingPtrSignatureForTests(
|
||||||
|
std::string stacktrace);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static PartitionAllocSupport* Get();
|
||||||
|
|
||||||
|
static BrpConfiguration GetBrpConfiguration(const std::string& process_type);
|
||||||
|
|
||||||
|
// Returns true if memory tagging should be enabled if available for the given
|
||||||
|
// process type. May be called multiple times per process.
|
||||||
|
static bool ShouldEnableMemoryTagging(const std::string& process_type);
|
||||||
|
|
||||||
|
// For calling from within third_party/blink/.
|
||||||
|
static bool ShouldEnableMemoryTaggingInRendererProcess();
|
||||||
|
|
||||||
|
private:
|
||||||
|
PartitionAllocSupport();
|
||||||
|
|
||||||
|
base::Lock lock_;
|
||||||
|
bool called_for_tests_ GUARDED_BY(lock_) = false;
|
||||||
|
bool called_earlyish_ GUARDED_BY(lock_) = false;
|
||||||
|
bool called_after_zygote_fork_ GUARDED_BY(lock_) = false;
|
||||||
|
bool called_after_feature_list_init_ GUARDED_BY(lock_) = false;
|
||||||
|
bool called_after_thread_pool_init_ GUARDED_BY(lock_) = false;
|
||||||
|
std::string established_process_type_ GUARDED_BY(lock_) = "INVALID";
|
||||||
|
|
||||||
|
#if PA_CONFIG(THREAD_CACHE_SUPPORTED) && \
|
||||||
|
BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
size_t largest_cached_size_ =
|
||||||
|
::partition_alloc::ThreadCacheLimits::kDefaultSizeThreshold;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
18
src/base/allocator/partition_allocator/BUILD.gn
Normal file
18
src/base/allocator/partition_allocator/BUILD.gn
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2023 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
import("partition_alloc.gni")
|
||||||
|
|
||||||
|
group("raw_ptr") {
|
||||||
|
public_deps = [ "src/partition_alloc:raw_ptr" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
group("buildflags") {
|
||||||
|
public_deps = [ "src/partition_alloc:buildflags" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_clang_or_gcc) {
|
||||||
|
group("partition_alloc") {
|
||||||
|
public_deps = [ "src/partition_alloc:partition_alloc" ]
|
||||||
|
}
|
||||||
|
}
|
50
src/base/allocator/partition_allocator/DEPS
Normal file
50
src/base/allocator/partition_allocator/DEPS
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright 2021 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
# PartitionAlloc library must not depend on Chromium
|
||||||
|
# project in order to be a standalone library.
|
||||||
|
noparent = True
|
||||||
|
|
||||||
|
include_rules = [
|
||||||
|
# `partition_alloc` can depends on itself, via the `include_dirs` it declares.
|
||||||
|
"+partition_alloc",
|
||||||
|
|
||||||
|
# Build flags to infer the architecture and operating system in use.
|
||||||
|
"+build/build_config.h",
|
||||||
|
"+build/buildflag.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
specific_include_rules = {
|
||||||
|
".*_(perf|unit)test\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
"+base/allocator/dispatcher/dispatcher.h",
|
||||||
|
"+base/debug/allocation_trace.h",
|
||||||
|
"+base/debug/debugging_buildflags.h",
|
||||||
|
"+base/debug/proc_maps_linux.h",
|
||||||
|
"+base/system/sys_info.h",
|
||||||
|
"+base/test/gtest_util.h",
|
||||||
|
"+base/timer/lap_timer.h",
|
||||||
|
"+base/win/windows_version.h",
|
||||||
|
"+testing/gmock/include/gmock/gmock.h",
|
||||||
|
"+testing/gtest/include/gtest/gtest.h",
|
||||||
|
"+testing/perf/perf_result_reporter.h",
|
||||||
|
],
|
||||||
|
"extended_api\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
],
|
||||||
|
"raw_(ptr|ref)_unittest\.cc$": [
|
||||||
|
"+base",
|
||||||
|
"+third_party/abseil-cpp/absl/types/optional.h",
|
||||||
|
"+third_party/abseil-cpp/absl/types/variant.h",
|
||||||
|
],
|
||||||
|
"raw_ptr_test_support\.h$": [
|
||||||
|
"+testing/gmock/include/gmock/gmock.h",
|
||||||
|
"+third_party/abseil-cpp/absl/types/optional.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# In the context of a module-level DEPS, the `deps` variable must be defined.
|
||||||
|
# Some tools relies on it. For instance dawn/tools/fetch_dawn_dependencies.py
|
||||||
|
# This has no use in other contexts.
|
||||||
|
deps = {}
|
8
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
8
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
monorail: {
|
||||||
|
component: "Blink>MemoryAllocator>Partition"
|
||||||
|
}
|
||||||
|
# Also security-dev@chromium.org
|
||||||
|
team_email: "platform-architecture-dev@chromium.org"
|
||||||
|
buganizer_public: {
|
||||||
|
component_id: 1456202
|
||||||
|
}
|
8
src/base/allocator/partition_allocator/OWNERS
Normal file
8
src/base/allocator/partition_allocator/OWNERS
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
bartekn@chromium.org
|
||||||
|
haraken@chromium.org
|
||||||
|
keishi@chromium.org
|
||||||
|
lizeb@chromium.org
|
||||||
|
tasak@google.com
|
||||||
|
|
||||||
|
per-file pointers/raw_ptr*=file://base/memory/MIRACLE_PTR_OWNERS
|
||||||
|
per-file pointers/raw_ref*=file://base/memory/MIRACLE_PTR_OWNERS
|
203
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
203
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
# PartitionAlloc Design
|
||||||
|
|
||||||
|
This document describes PartitionAlloc at a high level, with some architectural
|
||||||
|
details. For implementation details, see the comments in
|
||||||
|
`partition_alloc_constants.h`.
|
||||||
|
|
||||||
|
## Quick Links
|
||||||
|
|
||||||
|
* [Glossary](./glossary.md): Definitions of terms commonly used in
|
||||||
|
PartitionAlloc. The present document largely avoids defining terms.
|
||||||
|
|
||||||
|
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and
|
||||||
|
macros.
|
||||||
|
|
||||||
|
* [Chrome-External Builds](./external_builds.md): Further considerations
|
||||||
|
for standalone PartitionAlloc, plus an embedder's guide for some extra
|
||||||
|
GN args.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
PartitionAlloc is a memory allocator optimized for space efficiency,
|
||||||
|
allocation latency, and security.
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
|
||||||
|
paths of allocation and deallocation require very few (reasonably predictable)
|
||||||
|
branches. The number of operations in the fast paths is minimal, leading to the
|
||||||
|
possibility of inlining.
|
||||||
|
|
||||||
|
![The central allocator manages slots and spans. It is locked on a
|
||||||
|
per-partition basis. Separately, the thread cache consumes slots
|
||||||
|
from the central allocator, allowing it to hand out memory
|
||||||
|
quickly to individual threads.](./src/partition_alloc/dot/layers.png)
|
||||||
|
|
||||||
|
However, even the fast path isn't the fastest, because it requires taking
|
||||||
|
a per-partition lock. Although we optimized the lock, there was still room for
|
||||||
|
improvement; to this end, we introduced the thread cache.
|
||||||
|
The thread cache has been tailored to satisfy a vast majority of requests by
|
||||||
|
allocating from and releasing memory to the main allocator in batches,
|
||||||
|
amortizing lock acquisition and further improving locality while not trapping
|
||||||
|
excess memory.
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
Security is one of the important goals of PartitionAlloc.
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that different partitions exist in different regions
|
||||||
|
of the process's address space. When the caller has freed all objects contained
|
||||||
|
in a page in a partition, PartitionAlloc returns the physical memory to the
|
||||||
|
operating system, but continues to reserve the region of address space.
|
||||||
|
PartitionAlloc will only reuse an address space region for the same partition.
|
||||||
|
|
||||||
|
Similarly, one page can contain only objects from the same bucket.
|
||||||
|
When freed, PartitionAlloc returns the physical memory, but continues to reserve
|
||||||
|
the region for this very bucket.
|
||||||
|
|
||||||
|
The above techniques help avoid type confusion attacks. Note, however, these
|
||||||
|
apply only to normal buckets and not to direct map, as it'd waste too much
|
||||||
|
address space.
|
||||||
|
|
||||||
|
PartitionAlloc also guarantees that:
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt into, out of, or between
|
||||||
|
partitions. There are guard pages at the beginning and the end of each memory
|
||||||
|
region owned by a partition.
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt the allocation metadata.
|
||||||
|
PartitionAlloc records metadata in a dedicated, out-of-line region (not
|
||||||
|
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
|
||||||
|
exception.)
|
||||||
|
|
||||||
|
* Partial pointer overwrite of freelist pointer should fault.
|
||||||
|
|
||||||
|
* Direct map allocations have guard pages at the beginning and the end.
|
||||||
|
|
||||||
|
### Alignment
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that returned pointers are aligned on
|
||||||
|
`partition_alloc::internal::kAlignment` boundary (typically 16B on
|
||||||
|
64-bit systems, and 8B on 32-bit).
|
||||||
|
|
||||||
|
PartitionAlloc also supports higher levels of alignment, that can be requested
|
||||||
|
via `PartitionAlloc::AlignedAlloc()` or platform-specific APIs (such as
|
||||||
|
`posix_memalign()`). The requested
|
||||||
|
alignment has to be a power of two. PartitionAlloc reserves the right to round
|
||||||
|
up the requested size to the nearest power of two, greater than or equal to the
|
||||||
|
requested alignment. This may be wasteful, but allows taking advantage of
|
||||||
|
natural PartitionAlloc alignment guarantees. Allocations with an alignment
|
||||||
|
requirement greater than `partition_alloc::internal::kAlignment` are expected
|
||||||
|
to be very rare.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Layout in Memory
|
||||||
|
|
||||||
|
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
|
||||||
|
pages. Each super page is split into partition pages.
|
||||||
|
The first and the last partition page are permanently inaccessible and serve
|
||||||
|
as guard pages, with the exception of one system page in the middle of the first
|
||||||
|
partition page that holds metadata (32B struct per partition page).
|
||||||
|
|
||||||
|
![A super page is shown full of slot spans. The slot spans are logically
|
||||||
|
strung together to form buckets. At both extremes of the super page
|
||||||
|
are guard pages. PartitionAlloc metadata is hidden inside the
|
||||||
|
guard pages at the "front."](./src/partition_alloc/dot/super-page.png)
|
||||||
|
|
||||||
|
* The slot span numbers provide a visual hint of their size (in partition
|
||||||
|
pages).
|
||||||
|
* Colors provide a visual hint of the bucket to which the slot span belongs.
|
||||||
|
* Although only five colors are shown, in reality, a super page holds
|
||||||
|
tens of slot spans, some of which belong to the same bucket.
|
||||||
|
* The system page that holds metadata tracks each partition page with one 32B
|
||||||
|
[`PartitionPage` struct][PartitionPage], which is either
|
||||||
|
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
|
||||||
|
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
|
||||||
|
diagram).
|
||||||
|
* Gray fill denotes guard pages (one partition page each at the head and tail
|
||||||
|
of each super page).
|
||||||
|
* In some configurations, PartitionAlloc stores more metadata than can
|
||||||
|
fit in the one system page at the front. These are the bitmaps for
|
||||||
|
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
|
||||||
|
what would otherwise be usable space for slot spans. One, both, or
|
||||||
|
none of these bitmaps may be present, depending on build
|
||||||
|
configuration, runtime configuration, and type of allocation.
|
||||||
|
See [`SuperPagePayloadBegin()`][payload-start] for details.
|
||||||
|
|
||||||
|
As allocation requests arrive, there is eventually a need to allocate a new slot
|
||||||
|
span.
|
||||||
|
Address space for such a slot span is carved out from the last super page. If
|
||||||
|
not enough space, a new super page is allocated. Due to varying sizes of slot
|
||||||
|
span, this may lead to leaving space unused (we never go back to fill previous
|
||||||
|
super pages), which is fine because this memory is merely reserved, which is far
|
||||||
|
less precious than committed memory. Note also that address space reserved for a
|
||||||
|
slot span is never released, even if the slot span isn't used for a long time.
|
||||||
|
|
||||||
|
All slots in a newly allocated slot span are *free*, i.e. available for
|
||||||
|
allocation.
|
||||||
|
|
||||||
|
### Freelist Pointers
|
||||||
|
|
||||||
|
All free slots within a slot span are chained into a singly-linked free-list,
|
||||||
|
by writing the *next* pointer at the beginning of each slot, and the head of the
|
||||||
|
list is written in the metadata struct.
|
||||||
|
|
||||||
|
However, writing a pointer in each free slot of a newly allocated span would
|
||||||
|
require committing and faulting in physical pages upfront, which would be
|
||||||
|
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
|
||||||
|
Only provisioned slots are chained into the freelist.
|
||||||
|
Once provisioned slots in a span are depleted, then another page worth of slots
|
||||||
|
is provisioned (note, a slot that crosses a page boundary only gets
|
||||||
|
provisioned with slots of the next page). See
|
||||||
|
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
|
||||||
|
|
||||||
|
Freelist pointers are stored at the beginning of each free slot. As such, they
|
||||||
|
are the only metadata that is inline, i.e. stored among the
|
||||||
|
objects. This makes them prone to overruns. On little-endian systems, the
|
||||||
|
pointers are encoded by reversing byte order, so that partial overruns will very
|
||||||
|
likely result in destroying the pointer, as opposed to forming a valid pointer
|
||||||
|
to a nearby location.
|
||||||
|
|
||||||
|
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
|
||||||
|
different manner. This helps PartitionAlloc detect corruptions.
|
||||||
|
|
||||||
|
### Slot Span States
|
||||||
|
|
||||||
|
A slot span can be in any of 4 states:
|
||||||
|
* *Full*. A full span has no free slots.
|
||||||
|
* *Empty*. An empty span has no allocated slots, only free slots.
|
||||||
|
* *Active*. An active span is anything in between the above two.
|
||||||
|
* *Decommitted*. A decommitted span is a special case of an empty span, where
|
||||||
|
all pages are decommitted from memory.
|
||||||
|
|
||||||
|
PartitionAlloc prioritizes getting an available slot from an active span, over
|
||||||
|
an empty one, in hope that the latter can be soon transitioned into a
|
||||||
|
decommitted state, thus releasing memory. There is no mechanism, however, to
|
||||||
|
prioritize selection of a slot span based on the number of already allocated
|
||||||
|
slots.
|
||||||
|
|
||||||
|
An empty span becomes decommitted either when there are too many empty spans
|
||||||
|
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
|
||||||
|
low memory pressure conditions). An allocation can be satisfied from
|
||||||
|
a decommitted span if there are no active or empty spans available. The slot
|
||||||
|
provisioning mechanism kicks back in, committing the pages gradually as needed,
|
||||||
|
and the span becomes active. (There is currently no other way
|
||||||
|
to unprovision slots than decommitting the entire span).
|
||||||
|
|
||||||
|
As mentioned above, a bucket is a collection of slot spans containing slots of
|
||||||
|
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
|
||||||
|
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
|
||||||
|
There is no need for a full span list. The lists are updated lazily. An empty,
|
||||||
|
decommitted or full span may stay on the active list for some time, until
|
||||||
|
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
|
||||||
|
A decommitted span may stay on the empty list for some time,
|
||||||
|
until `PartitionBucket::SlowPathAlloc()` encounters it. However,
|
||||||
|
the inaccuracy can't happen in the other direction, i.e. an active span can only
|
||||||
|
be on the active list, and an empty span can only be on the active or empty
|
||||||
|
list.
|
||||||
|
|
||||||
|
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/src/partition_alloc/partition_page.h;l=454
|
103
src/base/allocator/partition_allocator/build_config.md
Normal file
103
src/base/allocator/partition_allocator/build_config.md
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# Build Config
|
||||||
|
|
||||||
|
PartitionAlloc's behavior and operation can be influenced by many
|
||||||
|
different settings. Broadly, these are controlled at the top-level by
|
||||||
|
[GN args][gn-declare-args], which propagate via
|
||||||
|
[buildflags][buildflag-header] and `#defined` clauses.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Most of what you'll want to know exists between
|
||||||
|
|
||||||
|
* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
|
||||||
|
* Everything else ending in `.gn` or `.gni` in
|
||||||
|
`//base/allocator/partition_allocator/src/partition_alloc/`,
|
||||||
|
* [`allocator.gni`][allocator-gni],
|
||||||
|
* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
|
||||||
|
* [`//base/BUILD.gn`][base-build-gn].
|
||||||
|
***
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of
|
||||||
|
PartitionAlloc's behavior is governed by compound conditions `#defined`
|
||||||
|
in [`partition_alloc_config.h`][partition-alloc-config].
|
||||||
|
***
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
PartitionAlloc targets C++17. As the team develops standalone
|
||||||
|
PartitionAlloc, this may diverge from what the rest of Chrome browser
|
||||||
|
does, as we will be obligated to support external clients that
|
||||||
|
may not yet support newer C++ standards.
|
||||||
|
|
||||||
|
See [Chrome-External Builds](./external_builds.md) for more.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Select GN Args
|
||||||
|
|
||||||
|
### `use_partition_alloc`
|
||||||
|
|
||||||
|
Defines whether PartitionAlloc is at all available.
|
||||||
|
|
||||||
|
Setting this `false` will entirely remove PartitionAlloc from the
|
||||||
|
Chromium build. _You probably do not want this._
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Back when PartitionAlloc was the dedicated allocator in Blink, disabling
|
||||||
|
it was logically identical to wholly disabling it in Chromium. This GN
|
||||||
|
arg organically grew in scope with the advent of
|
||||||
|
PartitionAlloc-Everywhere and must be `true` as a prerequisite for
|
||||||
|
enabling PA-E.
|
||||||
|
***
|
||||||
|
|
||||||
|
### `use_partition_alloc_as_malloc`
|
||||||
|
|
||||||
|
Does nothing special when value is `false`. Enables
|
||||||
|
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is `true`.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
|
||||||
|
includes Blink where PartitionAlloc originated, setting
|
||||||
|
`use_partition_alloc_as_malloc = false` does not disable PA usage in Blink,
|
||||||
|
which invokes PA explicitly (not via malloc).
|
||||||
|
* `use_partition_alloc_as_malloc = true` must not be confused
|
||||||
|
with `use_partition_alloc` (see above).
|
||||||
|
***
|
||||||
|
|
||||||
|
## Notable Macros
|
||||||
|
|
||||||
|
There is an ongoing effort
|
||||||
|
[to break out PartitionAlloc into a standalone library][pa-ee-crbug].
|
||||||
|
Once PartitionAlloc stands alone from the larger Chrome build apparatus,
|
||||||
|
the code loses access to some macros. This is not an immediate concern,
|
||||||
|
but the team needs to decide either
|
||||||
|
|
||||||
|
* how to propagate these macros in place, or
|
||||||
|
* how to remove them, replacing them with PA-specific build config.
|
||||||
|
|
||||||
|
A non-exhaustive list of work items:
|
||||||
|
|
||||||
|
* `OFFICIAL_BUILD` - influences crash macros and
|
||||||
|
`PA_THREAD_CACHE_ALLOC_STATS`. These are conceptually distinct enough
|
||||||
|
to be worth separating into dedicated build controls.
|
||||||
|
* `IS_PARTITION_ALLOC_IMPL` - must be defined when PartitionAlloc is
|
||||||
|
built as a shared library. This is required to export symbols.
|
||||||
|
* `COMPONENT_BUILD` - component builds (as per
|
||||||
|
`//docs/component_build.md`) must `#define COMPONENT_BUILD`.
|
||||||
|
Additionally, to build Win32, invoker must `#define WIN32`.
|
||||||
|
* `MEMORY_TOOL_REPLACES_ALLOCATOR`
|
||||||
|
* `*_SANITIZER` - mainly influences unit tests.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Over time, the above list should evolve into a list of macros / GN args
|
||||||
|
that influence PartitionAlloc's behavior.
|
||||||
|
***
|
||||||
|
|
||||||
|
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
|
||||||
|
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
|
||||||
|
[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
|
||||||
|
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
|
||||||
|
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
|
||||||
|
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
|
||||||
|
[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
|
||||||
|
[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
|
||||||
|
[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
|
||||||
|
[pa-ee-crbug]: https://crbug.com/1151236
|
@ -0,0 +1,9 @@
|
|||||||
|
# Copyright 2022 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
# This file will be used to check out PartitionAlloc and to build it as
|
||||||
|
# standalone library. In this case, PartitionAlloc needs to define
|
||||||
|
# build_with_chromium. If building PartitionAlloc as a part of chromium,
|
||||||
|
# chromium will provide build_with_chromium=true.
|
||||||
|
build_with_chromium = false
|
@ -0,0 +1,22 @@
|
|||||||
|
# Copyright 2022 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//build_overrides/build.gni")
|
||||||
|
|
||||||
|
# This is the default build configuration when building PartitionAlloc
|
||||||
|
# as a standalone library.
|
||||||
|
# If embedders want to use PartitionAlloc, they need to create their own
|
||||||
|
# //build_overrides/partition_alloc.gni and define their own PartitionAlloc
|
||||||
|
# configuration.
|
||||||
|
use_partition_alloc_as_malloc_default = false
|
||||||
|
use_allocator_shim_default = false
|
||||||
|
enable_backup_ref_ptr_support_default = false
|
||||||
|
enable_backup_ref_ptr_slow_checks_default = false
|
||||||
|
enable_dangling_raw_ptr_checks_default = false
|
||||||
|
assert_cpp20_default = true
|
||||||
|
|
||||||
|
# This is the default build configuration for pointers/raw_ptr*.
|
||||||
|
raw_ptr_zero_on_construct_default = true
|
||||||
|
raw_ptr_zero_on_move_default = true
|
||||||
|
raw_ptr_zero_on_destruct_default = false
|
76
src/base/allocator/partition_allocator/external_builds.md
Normal file
76
src/base/allocator/partition_allocator/external_builds.md
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Chrome-External Builds
|
||||||
|
|
||||||
|
Work is ongoing to make PartitionAlloc a standalone library. The
|
||||||
|
standalone repository for PartitionAlloc is hosted
|
||||||
|
[here][standalone-PA-repo].
|
||||||
|
|
||||||
|
## GN Args
|
||||||
|
|
||||||
|
External clients should examine the args described in
|
||||||
|
`build_overrides/partition_alloc.gni` and add them in their own source
|
||||||
|
tree. PartitionAlloc's build will expect them at
|
||||||
|
`//build_overrides/partition_alloc.gni`.
|
||||||
|
|
||||||
|
In addition, something must provide `build_with_chromium = false` to
|
||||||
|
the PA build system.
|
||||||
|
|
||||||
|
## `use_partition_alloc`
|
||||||
|
|
||||||
|
The `use_partition_alloc` GN arg, described in
|
||||||
|
[`build_config.md`](./build_config.md), provides a GN-level seam that
|
||||||
|
embedders
|
||||||
|
|
||||||
|
1. can set in their GN args and
|
||||||
|
2. should observe in their GN recipes to conditionally pull in
|
||||||
|
PartitionAlloc.
|
||||||
|
|
||||||
|
I.E. if you have any reason to disable PartitionAlloc, you should do so
|
||||||
|
with this GN arg. Avoid pulling in PartitionAlloc headers when the
|
||||||
|
corresponding buildflag is false.
|
||||||
|
|
||||||
|
Setting `use_partition_alloc` false will also implicitly disable other
|
||||||
|
features, e.g. nixing the compilation of BackupRefPtr as the
|
||||||
|
implementation of `raw_ptr<T>`.
|
||||||
|
|
||||||
|
## Periodic Memory Reduction Routines
|
||||||
|
|
||||||
|
PartitionAlloc provides APIs to
|
||||||
|
|
||||||
|
* reclaim memory (see [memory\_reclaimer.h](./memory_reclaimer.h)) and
|
||||||
|
|
||||||
|
* purge thread caches (see [thread\_cache.h](./thread_cache.h)).
|
||||||
|
|
||||||
|
Both of these must be called by the embedder external to PartitionAlloc.
|
||||||
|
PA provides neither an event loop nor timers of its own, delegating this
|
||||||
|
to its clients.
|
||||||
|
|
||||||
|
## Build Considerations
|
||||||
|
|
||||||
|
External clients create constraints on PartitionAlloc's implementation.
|
||||||
|
|
||||||
|
### C++17
|
||||||
|
|
||||||
|
PartitionAlloc targets C++17. This is aligned with our first external
|
||||||
|
client, PDFium, and may be further constrained by other clients. These
|
||||||
|
impositions prevent us from moving in lockstep with Chrome's target
|
||||||
|
C++ version.
|
||||||
|
|
||||||
|
We do not even have guarantees of backported future features, e.g.
|
||||||
|
C++20's designated initializers. Therefore, these cannot ship with
|
||||||
|
PartitionAlloc.
|
||||||
|
|
||||||
|
### MSVC Support
|
||||||
|
|
||||||
|
PDFium supports MSVC. PartitionAlloc will have to match it.
|
||||||
|
|
||||||
|
### MSVC Constraint: No Inline Assembly
|
||||||
|
|
||||||
|
MSVC's syntax for `asm` blocks differs from the one widely adopted in
|
||||||
|
parts of Chrome. But more generally,
|
||||||
|
[MSVC doesn't support inline assembly on ARM and x64 processors][msvc-inline-assembly].
|
||||||
|
Assembly blocks should be gated behind compiler-specific flags and
|
||||||
|
replaced with intrinsics in the presence of `COMPILER_MSVC` (absent
|
||||||
|
`__clang__`).
|
||||||
|
|
||||||
|
[standalone-PA-repo]: https://chromium.googlesource.com/chromium/src/base/allocator/partition_allocator.git
|
||||||
|
[msvc-inline-assembly]: https://docs.microsoft.com/en-us/cpp/assembler/inline/inline-assembler?view=msvc-170
|
245
src/base/allocator/partition_allocator/glossary.md
Normal file
245
src/base/allocator/partition_allocator/glossary.md
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
# Glossary
|
||||||
|
|
||||||
|
This page describes some core terminology used in PartitionAlloc.
|
||||||
|
A weak attempt is made to present terms "in conceptual order" s.t.
|
||||||
|
each term depends mainly upon previously defined ones.
|
||||||
|
|
||||||
|
### Partition
|
||||||
|
|
||||||
|
A heap that is separated and protected both from other
|
||||||
|
partitions and from non-PartitionAlloc memory. Each partition holds
|
||||||
|
multiple buckets.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
**NOTE**: In code (and comments), "partition," "root," and even
|
||||||
|
"allocator" are all conceptually the same thing.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Pages
|
||||||
|
|
||||||
|
### System Page
|
||||||
|
|
||||||
|
A memory page defined by the CPU/OS. Commonly
|
||||||
|
referred to as a "virtual page" in other contexts. This is typically
|
||||||
|
4KiB, but it can be larger. PartitionAlloc supports up to 64KiB,
|
||||||
|
though this constant isn't always known at compile time (depending
|
||||||
|
on the OS).
|
||||||
|
|
||||||
|
### Partition Page
|
||||||
|
|
||||||
|
The most common granularity used by
|
||||||
|
PartitionAlloc. Consists of exactly 4 system pages.
|
||||||
|
|
||||||
|
### Super Page
|
||||||
|
|
||||||
|
A 2MiB region, aligned on a 2MiB boundary. Not to
|
||||||
|
be confused with OS-level terms like "large page" or "huge page",
|
||||||
|
which are also commonly 2MiB. These have to be fully committed /
|
||||||
|
uncommitted in memory, whereas super pages can be partially committed
|
||||||
|
with system page granularity.
|
||||||
|
|
||||||
|
### Extent
|
||||||
|
|
||||||
|
An extent is a run of consecutive super pages (belonging
|
||||||
|
to a single partition). Extents are to super pages what slot spans are
|
||||||
|
to slots (see below).
|
||||||
|
|
||||||
|
## Slots and Spans
|
||||||
|
|
||||||
|
### Slot
|
||||||
|
|
||||||
|
An indivisible allocation unit. Slot sizes are tied to
|
||||||
|
buckets. For example, each allocation that falls into the bucket
|
||||||
|
(224, 256] would be satisfied with a slot of size 256. This
|
||||||
|
applies only to normal buckets, not to direct map.
|
||||||
|
|
||||||
|
### Slot Span
|
||||||
|
|
||||||
|
A run of same-sized slots that are contiguous in
|
||||||
|
memory. Slot span size is a multiple of partition page size, but it
|
||||||
|
isn't always a multiple of slot size, although we try hard for this
|
||||||
|
to be the case.
|
||||||
|
|
||||||
|
### Small Bucket
|
||||||
|
|
||||||
|
Allocations up to 4 partition pages. In these
|
||||||
|
cases, slot spans are always between 1 and 4 partition pages in
|
||||||
|
size. For each slot span size, the slot span is chosen to minimize
|
||||||
|
number of pages used while keeping the rounding waste under a
|
||||||
|
reasonable limit.
|
||||||
|
|
||||||
|
* For example, for a slot size 96, 64B waste is deemed acceptable
|
||||||
|
when using a single partition page, but for slot size
|
||||||
|
384, the potential waste of 256B wouldn't be, so 3 partition pages
|
||||||
|
are used to achieve 0B waste.
|
||||||
|
* PartitionAlloc may avoid waste by lowering the number of committed
|
||||||
|
system pages compared to the number of reserved pages. For
|
||||||
|
example, for the slot size of 896B we'd use a slot span of 2
|
||||||
|
partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit
|
||||||
|
only up to 7, thus resulting in perfect packing.
|
||||||
|
|
||||||
|
### Single-Slot Span
|
||||||
|
|
||||||
|
Allocations above 4 partition pages (but
|
||||||
|
≤`kMaxBucketed`). This is because each slot span is guaranteed to
|
||||||
|
hold exactly one slot.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Fun fact: there are sizes ≤4 partition pages that result in a
|
||||||
|
slot span having exactly 1 slot, but nonetheless they're still
|
||||||
|
classified as small buckets. The reason is that single-slot spans
|
||||||
|
are often handled by a different code path, and that distinction
|
||||||
|
is made purely based on slot size, for simplicity and efficiency.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Buckets
|
||||||
|
|
||||||
|
### Bucket
|
||||||
|
|
||||||
|
A collection of regions in a partition that contains
|
||||||
|
similar-sized objects. For example, one bucket may hold objects of
|
||||||
|
size (224, 256], another (256, 320], etc. Bucket size
|
||||||
|
brackets are geometrically spaced,
|
||||||
|
[going up to `kMaxBucketed`][max-bucket-comment].
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Plainly put, all slots (ergo the resulting spans) of a given size
|
||||||
|
class are logically chained into one bucket.
|
||||||
|
***
|
||||||
|
|
||||||
|
![A bucket, spanning multiple super pages, collects spans whose
|
||||||
|
slots are of a particular size class.](./src/partition_alloc/dot/bucket.png)
|
||||||
|
|
||||||
|
### Normal Bucket
|
||||||
|
|
||||||
|
Any bucket whose size ceiling does not exceed
|
||||||
|
`kMaxBucketed`. This is the common case in PartitionAlloc, and
|
||||||
|
the "normal" modifier is often dropped in casual reference.
|
||||||
|
|
||||||
|
### Direct Map (Bucket)
|
||||||
|
|
||||||
|
Any allocation whose size exceeds `kMaxBucketed`.
|
||||||
|
|
||||||
|
## Other Terms
|
||||||
|
|
||||||
|
### Object
|
||||||
|
|
||||||
|
A chunk of memory returned to the allocating invoker
|
||||||
|
of the size requested. It doesn't have to span the entire slot,
|
||||||
|
nor does it have to begin at the slot start. This term is commonly
|
||||||
|
used as a parameter name in PartitionAlloc code, as opposed to
|
||||||
|
`slot_start`.
|
||||||
|
|
||||||
|
### Thread Cache
|
||||||
|
|
||||||
|
A [thread-local structure][pa-thread-cache] that
|
||||||
|
holds some not-too-large memory chunks, ready to be allocated. This
|
||||||
|
speeds up in-thread allocation by reducing a lock hold to a
|
||||||
|
thread-local storage lookup, improving cache locality.
|
||||||
|
|
||||||
|
### Pool
|
||||||
|
|
||||||
|
A large (and contiguous on 64-bit) virtual address region, housing
|
||||||
|
super pages, etc. from which PartitionAlloc services allocations. The
|
||||||
|
primary purpose of the pools is to provide a fast answer to the
|
||||||
|
question, "Did PartitionAlloc allocate the memory for this pointer
|
||||||
|
from this pool?" with a single bit-masking operation.
|
||||||
|
|
||||||
|
* The regular pool is a general purpose pool that contains allocations that
|
||||||
|
aren't protected by BackupRefPtr.
|
||||||
|
* The BRP pool contains all allocations protected by BackupRefPtr.
|
||||||
|
* [64-bit only] The configurable pool is named generically, because its
|
||||||
|
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime,
|
||||||
|
providing a pre-existing mapping. Its allocations aren't protected by
|
||||||
|
BackupRefPtr.
|
||||||
|
* [64-bit only] The thread isolated pool is returning memory protected with
|
||||||
|
per-thread permissions. At the moment, this is implemented for pkeys on x64.
|
||||||
|
It's primary user is [V8 CFI][v8-cfi].
|
||||||
|
|
||||||
|
![The singular AddressPoolManager mediates access to the separate pools
|
||||||
|
for each PartitionRoot.](./src/partition_alloc/dot/address-space.png)
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Pools are downgraded into a logical concept in 32-bit environments,
|
||||||
|
tracking a non-contiguous set of allocations using a bitmap.
|
||||||
|
***
|
||||||
|
|
||||||
|
### Payload
|
||||||
|
|
||||||
|
The usable area of a super page in which slot spans
|
||||||
|
reside. While generally this means "everything between the first
|
||||||
|
and last guard partition pages in a super page," the presence of
|
||||||
|
other metadata (e.g. StarScan bitmaps) can bump the starting offset
|
||||||
|
forward. While this term is entrenched in the code, the team
|
||||||
|
considers it suboptimal and is actively looking for a replacement.
|
||||||
|
|
||||||
|
### Allocation Fast Path
|
||||||
|
|
||||||
|
A path taken during an allocation that is
|
||||||
|
considered fast. Usually means that an allocation request can be
|
||||||
|
immediately satisfied by grabbing a slot from the freelist of the
|
||||||
|
first active slot span in the bucket.
|
||||||
|
|
||||||
|
### Allocation Slow Path
|
||||||
|
|
||||||
|
Anything which is not fast (see above).
|
||||||
|
|
||||||
|
Can involve
|
||||||
|
|
||||||
|
* finding another active slot span in the list,
|
||||||
|
* provisioning more slots in a slot span,
|
||||||
|
* bringing back a free (or decommitted) slot span,
|
||||||
|
* allocating a new slot span, or even
|
||||||
|
* allocating a new super page.
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
By "slow" we may mean something as simple as extra logic (`if`
|
||||||
|
statements etc.), or something as costly as system calls.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Legacy Terms
|
||||||
|
|
||||||
|
These terms are (mostly) deprecated and should not be used. They are
|
||||||
|
surfaced here to provide a ready reference for readers coming from
|
||||||
|
older design documents or documentation.
|
||||||
|
|
||||||
|
### GigaCage
|
||||||
|
|
||||||
|
A memory region several gigabytes wide, reserved by
|
||||||
|
PartitionAlloc upon initialization, from which nearly all allocations
|
||||||
|
are taken. _Pools_ have overtaken GigaCage in conceptual importance,
|
||||||
|
and so and so there is less need today to refer to "GigaCage" or the
|
||||||
|
"cage." This is especially true given the V8 Sandbox and the
|
||||||
|
configurable pool (see above).
|
||||||
|
|
||||||
|
## PartitionAlloc-Everywhere
|
||||||
|
|
||||||
|
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
|
||||||
|
It was invoked explicitly, by calling PartitionAlloc APIs directly.
|
||||||
|
|
||||||
|
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
|
||||||
|
to the entire-ish codebase (exclusions apply). This was done by intercepting
|
||||||
|
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
|
||||||
|
routing them into PartitionAlloc. The shim located in
|
||||||
|
`base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h` is
|
||||||
|
responsible for intercepting. For more details, see
|
||||||
|
[base/allocator/README.md](../../../base/allocator/README.md).
|
||||||
|
|
||||||
|
A special, catch-it-all *Malloc* partition has been created for the intercepted
|
||||||
|
`malloc()` et al. This is to isolate from already existing Blink partitions.
|
||||||
|
The only exception from that is Blink's *FastMalloc* partition, which was also
|
||||||
|
catch-it-all in nature, so it's perfectly fine to merge these together, to
|
||||||
|
minimize fragmentation.
|
||||||
|
|
||||||
|
As of 2022, PartitionAlloc-Everywhere is supported on
|
||||||
|
|
||||||
|
* Windows 32- and 64-bit
|
||||||
|
* Linux
|
||||||
|
* Android 32- and 64-bit
|
||||||
|
* macOS
|
||||||
|
* Fuchsia
|
||||||
|
|
||||||
|
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
|
||||||
|
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/thread_cache.h
|
||||||
|
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#
|
||||||
|
[v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview#
|
394
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
394
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
@ -0,0 +1,394 @@
|
|||||||
|
# Copyright 2022 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//build/config/cronet/config.gni")
|
||||||
|
import("//build/config/sanitizers/sanitizers.gni")
|
||||||
|
import("//build_overrides/partition_alloc.gni")
|
||||||
|
|
||||||
|
# PartitionAlloc have limited support for MSVC's cl.exe compiler. It can only
|
||||||
|
# access the generate "buildflags" and the "raw_ptr" definitions implemented
|
||||||
|
# with RawPtrNoOpImpl. Everything else is considered not supported.
|
||||||
|
#
|
||||||
|
# Since there are no other good ways to detect MSVC's cl.exe, we are reusing the
|
||||||
|
# same definition used by Chrome in //base/BUILD.gn. See
|
||||||
|
# https://crbug.com/988071.
|
||||||
|
is_clang_or_gcc = is_clang || !is_win
|
||||||
|
|
||||||
|
# Whether 64-bit pointers are used.
|
||||||
|
# A static_assert in partition_alloc_config.h verifies that.
|
||||||
|
if (is_nacl) {
|
||||||
|
# NaCl targets don't use 64-bit pointers.
|
||||||
|
has_64_bit_pointers = false
|
||||||
|
} else if (current_cpu == "x64" || current_cpu == "arm64" ||
|
||||||
|
current_cpu == "loong64" || current_cpu == "riscv64") {
|
||||||
|
has_64_bit_pointers = true
|
||||||
|
} else if (current_cpu == "x86" || current_cpu == "arm") {
|
||||||
|
has_64_bit_pointers = false
|
||||||
|
} else {
|
||||||
|
assert(false, "Unknown CPU: $current_cpu")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Increases the size of the empty slot span ring.
|
||||||
|
use_large_empty_slot_span_ring = is_mac
|
||||||
|
|
||||||
|
has_memory_tagging =
|
||||||
|
current_cpu == "arm64" && is_clang && !is_asan && (is_linux || is_android)
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Causes all the allocations to be routed via allocator_shim.cc. Usually,
|
||||||
|
# the allocator shim will, in turn, route them to PartitionAlloc, but
|
||||||
|
# other allocators are also supported by the allocator shim.
|
||||||
|
use_allocator_shim = use_allocator_shim_default && is_clang_or_gcc
|
||||||
|
|
||||||
|
# Whether PartitionAlloc should be available for use or not.
|
||||||
|
# true makes PartitionAlloc linked to the executable or shared library and
|
||||||
|
# makes it available for use. It doesn't mean that the default allocator
|
||||||
|
# is PartitionAlloc, which is governed by |use_partition_alloc_as_malloc|.
|
||||||
|
#
|
||||||
|
# N.B. generally, embedders should look at this GN arg and at the
|
||||||
|
# corresponding buildflag to determine whether to interact with PA
|
||||||
|
# source at all (pulling the component in via GN, including headers,
|
||||||
|
# etc.). There is nothing stopping a lazy embedder from ignoring this
|
||||||
|
# and unconditionally using PA, but such a setup is inadvisable.
|
||||||
|
#
|
||||||
|
# In Chromium, this is set true, except:
|
||||||
|
#
|
||||||
|
# 1. On Cronet bots, because Cronet doesn't use PartitionAlloc at all,
|
||||||
|
# and doesn't wish to incur the library size increase (crbug.com/674570).
|
||||||
|
# 2. On NaCl (through this declaration), where PartitionAlloc doesn't
|
||||||
|
# build at all.
|
||||||
|
use_partition_alloc = !is_nacl && is_clang_or_gcc
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_clang_or_gcc) {
|
||||||
|
assert(!use_partition_alloc,
|
||||||
|
"PartitionAlloc's allocator does not support this compiler")
|
||||||
|
assert(!use_allocator_shim,
|
||||||
|
"PartitionAlloc's allocator shim does not support this compiler")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_nacl) {
|
||||||
|
assert(!use_partition_alloc, "PartitionAlloc doesn't build on NaCl")
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Turns on compiler optimizations in PartitionAlloc in Debug build.
|
||||||
|
# If enabling PartitionAlloc-Everywhere in Debug build for tests in Debug
|
||||||
|
# build, since all memory allocations and deallocations are executed by
|
||||||
|
# non-optimized PartitionAlloc, chrome (including tests) will be much
|
||||||
|
# slower. This will cause debug trybots' timeouts. If we want to debug
|
||||||
|
# PartitionAlloc itself, use partition_alloc_optimized_debug=false.
|
||||||
|
# Otherwise, use partition_alloc_optimized_debug=true to enable optimized
|
||||||
|
# PartitionAlloc.
|
||||||
|
partition_alloc_optimized_debug = true
|
||||||
|
|
||||||
|
# PartitionAlloc-Everywhere (PA-E). Causes allocator_shim.cc to route
|
||||||
|
# calls to PartitionAlloc, rather than some other platform allocator.
|
||||||
|
use_partition_alloc_as_malloc = use_partition_alloc && use_allocator_shim &&
|
||||||
|
use_partition_alloc_as_malloc_default
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(!use_allocator_shim || (is_android || is_apple || is_chromeos ||
|
||||||
|
is_fuchsia || is_linux || is_win),
|
||||||
|
"The allocator shim does not (yet) support the platform.")
|
||||||
|
|
||||||
|
if (use_allocator_shim && is_win) {
|
||||||
|
# It's hard to override CRT's malloc family in every case in the component
|
||||||
|
# build, and it's very easy to override it partially and to be inconsistent
|
||||||
|
# among allocations and deallocations. Then, we'll crash when PA deallocates
|
||||||
|
# a memory region allocated by the CRT's malloc or vice versa.
|
||||||
|
assert(!is_component_build,
|
||||||
|
"The allocator shim doesn't work for the component build on Windows.")
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
use_freeslot_bitmap = false
|
||||||
|
|
||||||
|
# Puts the regular and BRP pools right next to each other, so that we can
|
||||||
|
# check "belongs to one of the two pools" with a single bitmask operation.
|
||||||
|
glue_core_pools = false
|
||||||
|
|
||||||
|
# Introduces pointer compression support in PA. These are 4-byte
|
||||||
|
# pointers that can point within the core pools (regular and BRP).
|
||||||
|
#
|
||||||
|
# This is effective only for memory allocated from PartitionAlloc, so it is
|
||||||
|
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
|
||||||
|
# can create and use PA partitions explicitly.
|
||||||
|
enable_pointer_compression_support = false
|
||||||
|
|
||||||
|
# Enables a bounds check when two pointers (at least one being raw_ptr) are
|
||||||
|
# subtracted (if supported by the underlying implementation).
|
||||||
|
enable_pointer_subtraction_check = false
|
||||||
|
|
||||||
|
# Enables a compile-time check that all raw_ptrs to which arithmetic
|
||||||
|
# operations are to be applied are annotated with the AllowPtrArithmetic
|
||||||
|
# trait,
|
||||||
|
enable_pointer_arithmetic_trait_check = true
|
||||||
|
|
||||||
|
# Forwards all the allocation/freeing calls in shim (e.g. operator new)
|
||||||
|
# through malloc. Useful for using with tools that intercept malloc, e.g.
|
||||||
|
# heaptrack.
|
||||||
|
forward_through_malloc = false
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Build support for Use-after-Free protection via BackupRefPtr (BRP),
|
||||||
|
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
|
||||||
|
#
|
||||||
|
# These are effective only for memory allocated from PartitionAlloc, so it is
|
||||||
|
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
|
||||||
|
# can create and use PA partitions explicitly.
|
||||||
|
#
|
||||||
|
# Note that |enable_backup_ref_ptr_support = true| doesn't necessarily enable
|
||||||
|
# BRP protection. It'll be enabled only for partition created with
|
||||||
|
# partition_alloc::PartitionOptions::kEnabled.
|
||||||
|
enable_backup_ref_ptr_support =
|
||||||
|
use_partition_alloc && enable_backup_ref_ptr_support_default
|
||||||
|
|
||||||
|
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
|
||||||
|
# binary size. This flag can be used to enable it for official builds too.
|
||||||
|
force_enable_raw_ptr_exclusion = false
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(!enable_pointer_compression_support || glue_core_pools,
|
||||||
|
"Pointer compression relies on core pools being contiguous.")
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Make explicit calls to ASAN at runtime, e.g. to mark quarrantined memory
|
||||||
|
# as poisoned. Allows ASAN to tell if a particular memory error is protected
|
||||||
|
# by BRP in its reports.
|
||||||
|
#
|
||||||
|
# The implementation of ASan BRP is purpose-built to inspect Chromium
|
||||||
|
# internals and is entangled with `//base` s.t. it cannot be used
|
||||||
|
# outside of Chromium.
|
||||||
|
use_asan_backup_ref_ptr =
|
||||||
|
build_with_chromium && is_asan &&
|
||||||
|
(is_win || is_android || is_linux || is_mac || is_chromeos)
|
||||||
|
|
||||||
|
# Use probe-on-destruct unowned ptr detection with ASAN.
|
||||||
|
use_asan_unowned_ptr = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use the version of raw_ptr<T> that allows the embedder to implement custom
|
||||||
|
# logic.
|
||||||
|
use_hookable_raw_ptr = use_asan_backup_ref_ptr
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
|
||||||
|
# are too expensive to have on by default.
|
||||||
|
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
|
||||||
|
# dangling during their lifetime.
|
||||||
|
# - backup_ref_ptr_poison_oob_ptr: poison out-of-bounds (OOB) pointers to
|
||||||
|
# generate an exception in the event that an OOB pointer is dereferenced.
|
||||||
|
# - enable_backup_ref_ptr_instance_tracer: use a global table to track all
|
||||||
|
# live raw_ptr/raw_ref instances to help debug dangling pointers at test
|
||||||
|
# end.
|
||||||
|
|
||||||
|
enable_backup_ref_ptr_slow_checks =
|
||||||
|
enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support
|
||||||
|
|
||||||
|
# Enable the feature flag required to activate backup ref pointers. That is to
|
||||||
|
# say `PartitionAllocBackupRefPtr`.
|
||||||
|
#
|
||||||
|
# This is meant to be used primarily on bots. It is much easier to override
|
||||||
|
# the feature flags using a binary flag instead of updating multiple bots's
|
||||||
|
# scripts to pass command line arguments.
|
||||||
|
enable_backup_ref_ptr_feature_flag = false
|
||||||
|
|
||||||
|
# Build support for Dangling Ptr Detection (DPD) via BackupRefPtr (BRP),
|
||||||
|
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
|
||||||
|
enable_dangling_raw_ptr_checks =
|
||||||
|
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
|
||||||
|
|
||||||
|
# Enable the feature flag required to check for dangling pointers. That is to
|
||||||
|
# say `PartitionAllocDanglingPtr`.
|
||||||
|
#
|
||||||
|
# This is meant to be used primarily on bots. It is much easier to override
|
||||||
|
# the feature flags using a binary flag instead of updating multiple bots's
|
||||||
|
# scripts to pass command line arguments.
|
||||||
|
enable_dangling_raw_ptr_feature_flag = false
|
||||||
|
|
||||||
|
enable_backup_ref_ptr_instance_tracer = false
|
||||||
|
|
||||||
|
backup_ref_ptr_extra_oob_checks = enable_backup_ref_ptr_support
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
backup_ref_ptr_poison_oob_ptr =
|
||||||
|
false && backup_ref_ptr_extra_oob_checks && has_64_bit_pointers
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Shadow metadata is still under development and only supports Linux
|
||||||
|
# for now.
|
||||||
|
enable_shadow_metadata = false
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Use full MTE protection available by changing the feature flag default
|
||||||
|
# values. So sync mode on all processes. Also disables permissive MTE.
|
||||||
|
#
|
||||||
|
# This is meant to be used primarily on bots. It is much easier to override
|
||||||
|
# the feature flags using a binary flag instead of updating multiple bots's
|
||||||
|
# scripts to pass command line arguments.
|
||||||
|
use_full_mte = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# *Scan is currently only used by Chromium, and supports only 64-bit.
|
||||||
|
use_starscan = build_with_chromium && has_64_bit_pointers
|
||||||
|
|
||||||
|
pcscan_stack_supported =
|
||||||
|
use_starscan &&
|
||||||
|
(current_cpu == "x64" || current_cpu == "x86" || current_cpu == "arm" ||
|
||||||
|
current_cpu == "arm64" || current_cpu == "riscv64")
|
||||||
|
|
||||||
|
# We want to provide assertions that guard against inconsistent build
|
||||||
|
# args, but there is no point in having them fire if we're not building
|
||||||
|
# PartitionAlloc at all. If `use_partition_alloc` is false, we jam all
|
||||||
|
# related args to `false`.
|
||||||
|
#
|
||||||
|
# Do not clear the following, as they can function outside of PartitionAlloc
|
||||||
|
# - has_64_bit_pointers
|
||||||
|
# - has_memory_tagging
|
||||||
|
if (!use_partition_alloc) {
|
||||||
|
use_partition_alloc_as_malloc = false
|
||||||
|
enable_backup_ref_ptr_support = false
|
||||||
|
use_asan_backup_ref_ptr = false
|
||||||
|
use_asan_unowned_ptr = false
|
||||||
|
use_hookable_raw_ptr = false
|
||||||
|
enable_backup_ref_ptr_slow_checks = false
|
||||||
|
enable_dangling_raw_ptr_checks = false
|
||||||
|
enable_dangling_raw_ptr_feature_flag = false
|
||||||
|
enable_pointer_subtraction_check = false
|
||||||
|
backup_ref_ptr_poison_oob_ptr = false
|
||||||
|
enable_backup_ref_ptr_instance_tracer = false
|
||||||
|
use_starscan = false
|
||||||
|
use_full_mte = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disable |use_full_mte| if memory tagging is not available. This is for targets that run as part the build process.
|
||||||
|
if (!has_memory_tagging) {
|
||||||
|
use_full_mte = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# enable_backup_ref_ptr_slow_checks can only be used if
|
||||||
|
# enable_backup_ref_ptr_support is true.
|
||||||
|
assert(enable_backup_ref_ptr_support || !enable_backup_ref_ptr_slow_checks,
|
||||||
|
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
|
||||||
|
|
||||||
|
# enable_dangling_raw_ptr_checks can only be used if
|
||||||
|
# enable_backup_ref_ptr_support is true.
|
||||||
|
assert(
|
||||||
|
enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks,
|
||||||
|
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# It's meaningless to force on DPD (e.g. on bots) if the support isn't compiled
|
||||||
|
# in.
|
||||||
|
assert(enable_dangling_raw_ptr_checks || !enable_dangling_raw_ptr_feature_flag,
|
||||||
|
"Meaningless to enable DPD without it compiled.")
|
||||||
|
|
||||||
|
# To enable extra OOB checks for BackupRefPtr, the underlying feature must be
|
||||||
|
# enabled, too.
|
||||||
|
assert(enable_backup_ref_ptr_support || !backup_ref_ptr_extra_oob_checks,
|
||||||
|
"Can't enable extra OOB checks if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# To poison OOB pointers for BackupRefPtr, the underlying feature must be
|
||||||
|
# enabled, too.
|
||||||
|
assert(backup_ref_ptr_extra_oob_checks || !backup_ref_ptr_poison_oob_ptr,
|
||||||
|
"Can't enable poisoning for OOB pointers if OOB checks aren't enabled " +
|
||||||
|
"at all")
|
||||||
|
assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr,
|
||||||
|
"Can't enable poisoning for OOB pointers if pointers are only 32-bit")
|
||||||
|
|
||||||
|
# AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of
|
||||||
|
# raw_ptr.
|
||||||
|
assert(
|
||||||
|
!use_asan_unowned_ptr || !use_asan_backup_ref_ptr,
|
||||||
|
"Both AsanUnownedPtr and AsanBackupRefPtr can't be enabled at the same " +
|
||||||
|
"time")
|
||||||
|
|
||||||
|
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
|
||||||
|
assert(
|
||||||
|
!enable_backup_ref_ptr_support || !use_asan_backup_ref_ptr,
|
||||||
|
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
|
||||||
|
|
||||||
|
# BackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr.
|
||||||
|
assert(!enable_backup_ref_ptr_support || !use_asan_unowned_ptr,
|
||||||
|
"Both BackupRefPtr and AsanUnownedPtr can't be enabled at the same time")
|
||||||
|
|
||||||
|
# RawPtrHookableImpl and BackupRefPtr are mutually exclusive variants of
|
||||||
|
# raw_ptr.
|
||||||
|
assert(
|
||||||
|
!use_hookable_raw_ptr || !enable_backup_ref_ptr_support,
|
||||||
|
"Both RawPtrHookableImpl and BackupRefPtr can't be enabled at the same " +
|
||||||
|
"time")
|
||||||
|
|
||||||
|
# RawPtrHookableImpl and AsanUnownedPtr are mutually exclusive variants of
|
||||||
|
# raw_ptr.
|
||||||
|
assert(
|
||||||
|
!use_hookable_raw_ptr || !use_asan_unowned_ptr,
|
||||||
|
"Both RawPtrHookableImpl and AsanUnownedPtr can't be enabled at the same " +
|
||||||
|
"time")
|
||||||
|
|
||||||
|
assert(!use_asan_backup_ref_ptr || is_asan,
|
||||||
|
"AsanBackupRefPtr requires AddressSanitizer")
|
||||||
|
|
||||||
|
assert(!use_asan_unowned_ptr || is_asan,
|
||||||
|
"AsanUnownedPtr requires AddressSanitizer")
|
||||||
|
|
||||||
|
# AsanBackupRefPtr is not supported outside Chromium. The implementation is
|
||||||
|
# entangled with `//base`. The code is only physically located with the rest of
|
||||||
|
# `raw_ptr` to keep it together.
|
||||||
|
assert(build_with_chromium || !use_asan_backup_ref_ptr,
|
||||||
|
"AsanBackupRefPtr is not supported outside Chromium")
|
||||||
|
|
||||||
|
assert(!use_asan_backup_ref_ptr || use_hookable_raw_ptr,
|
||||||
|
"AsanBackupRefPtr requires RawPtrHookableImpl")
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# pkeys support is explicitly disabled in all Cronet builds, as some test
|
||||||
|
# dependencies that use partition_allocator are compiled in AOSP against a
|
||||||
|
# version of glibc that does not include pkeys syscall numbers.
|
||||||
|
enable_pkeys =
|
||||||
|
(is_linux || is_chromeos) && target_cpu == "x64" && !is_cronet_build
|
||||||
|
}
|
||||||
|
assert(!enable_pkeys || ((is_linux || is_chromeos) && target_cpu == "x64"),
|
||||||
|
"Pkeys are only supported on x64 linux and ChromeOS")
|
||||||
|
|
||||||
|
# Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when
|
||||||
|
# constructing, destructing or moving out of a pointer. When using these
|
||||||
|
# implementations, raw_ptrs<> will be always be zeroed, no matter what
|
||||||
|
# GN args or flags are present.
|
||||||
|
#
|
||||||
|
# Other implementations of raw_ptr<>, like NoOpImpl, don't require zeroing
|
||||||
|
# and do not do so by default. This can lead to subtle bugs when testing
|
||||||
|
# against one of the zeroing impls and then deploying on a platform that is
|
||||||
|
# using a non-zeroing implementation. Setting the following GN args to
|
||||||
|
# true triggers zeroing even for implementations that don't require it.
|
||||||
|
# This provides consistency with the other impls. This is the recommended
|
||||||
|
# setting.
|
||||||
|
#
|
||||||
|
# Setting these to false will make raw_ptr<> behave more like raw C++ pointer
|
||||||
|
# `T*`, making NoOpImpl act like an actual no-op, so use it if you're worried
|
||||||
|
# about performance of your project. Use at your own risk, as it's unsupported
|
||||||
|
# and untested within Chromium.
|
||||||
|
#
|
||||||
|
# Even when these are set to true, the raw_ptr trait AllowUninitialized
|
||||||
|
# provides a finer-grained mechanism for opting out of initialization on a
|
||||||
|
# pointer by pointer basis when using a non-zeroing implementation.
|
||||||
|
#
|
||||||
|
# Caveat: _zero_on_move and _on_destruct will prevent the type from being
|
||||||
|
# trivially copyable, _zero_on_construct and _on_destruct will prevent the
|
||||||
|
# type from being trivially default constructible.
|
||||||
|
declare_args() {
|
||||||
|
raw_ptr_zero_on_construct = raw_ptr_zero_on_construct_default
|
||||||
|
raw_ptr_zero_on_move = raw_ptr_zero_on_move_default
|
||||||
|
raw_ptr_zero_on_destruct = raw_ptr_zero_on_destruct_default
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Assert that PartitionAlloc and MiraclePtr run on C++20 when set to true.
|
||||||
|
# Embedders may opt-out of using C++ 20 build.
|
||||||
|
assert_cpp20 = assert_cpp20_default
|
||||||
|
}
|
@ -0,0 +1,922 @@
|
|||||||
|
# Copyright 2022 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/android/config.gni")
|
||||||
|
import("//build/config/cast.gni")
|
||||||
|
import("//build/config/chromeos/ui_mode.gni")
|
||||||
|
import("//build/config/compiler/compiler.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
import("//build/config/logging.gni")
|
||||||
|
import("../../partition_alloc.gni")
|
||||||
|
|
||||||
|
# Add partition_alloc.gni and import it for partition_alloc configs.
|
||||||
|
|
||||||
|
# TODO(https://crbug.com/1467773): Split PartitionAlloc into a public and
|
||||||
|
# private parts. The public config would include add the "./include" dir and
|
||||||
|
# the private config would add the "./src" dir.
|
||||||
|
# TODO(https://crbug.com/1467773): Move this config and several target into
|
||||||
|
# "../..".
|
||||||
|
config("public_includes") {
|
||||||
|
include_dirs = [
|
||||||
|
"..",
|
||||||
|
"$root_gen_dir/" + rebase_path("..", "//"),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Enable more warnings that were found when using PartitionAlloc in other
|
||||||
|
# projects.
|
||||||
|
#
|
||||||
|
# This list was initially copied from Dawn, who gathered this list from its own
|
||||||
|
# dependants.
|
||||||
|
config("dependants_extra_warnings") {
|
||||||
|
# Add them only when building PartitionAlloc as part of Chrome, because we
|
||||||
|
# control which clang version we use. Otherwise we risk breaking dependants
|
||||||
|
# when they use a different clang version.
|
||||||
|
#
|
||||||
|
# Fuchsia has been excluded from the extra warnings: Dependency over
|
||||||
|
# fuchsia.kernel involves too many warning. This is not a real issue, because
|
||||||
|
# the header is only used by PartitionAlloc internally. The dependants do not
|
||||||
|
# include it transitively.
|
||||||
|
if (build_with_chromium && is_clang && !is_fuchsia) {
|
||||||
|
cflags = [
|
||||||
|
"-Wc++11-narrowing",
|
||||||
|
"-Wconditional-uninitialized",
|
||||||
|
"-Wcstring-format-directive",
|
||||||
|
"-Wctad-maybe-unsupported",
|
||||||
|
"-Wdeprecated-copy",
|
||||||
|
"-Wdeprecated-copy-dtor",
|
||||||
|
"-Wduplicate-enum",
|
||||||
|
"-Wextra-semi",
|
||||||
|
"-Wextra-semi-stmt",
|
||||||
|
"-Wimplicit-fallthrough",
|
||||||
|
"-Winconsistent-missing-destructor-override",
|
||||||
|
"-Winvalid-offsetof",
|
||||||
|
"-Wmissing-field-initializers",
|
||||||
|
"-Wnon-c-typedef-for-linkage",
|
||||||
|
"-Wpessimizing-move",
|
||||||
|
"-Wrange-loop-analysis",
|
||||||
|
"-Wredundant-move",
|
||||||
|
"-Wshadow-field",
|
||||||
|
"-Wstrict-prototypes",
|
||||||
|
"-Wsuggest-destructor-override",
|
||||||
|
"-Wsuggest-override",
|
||||||
|
"-Wtautological-unsigned-zero-compare",
|
||||||
|
"-Wunreachable-code-aggressive",
|
||||||
|
"-Wunused-but-set-variable",
|
||||||
|
"-Wunused-macros",
|
||||||
|
]
|
||||||
|
|
||||||
|
# clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
|
||||||
|
if (is_win) {
|
||||||
|
cflags += [ "/clang:-pedantic" ]
|
||||||
|
} else {
|
||||||
|
cflags += [ "-pedantic" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_remove_configs = []
|
||||||
|
_add_configs = []
|
||||||
|
if (!is_debug || partition_alloc_optimized_debug) {
|
||||||
|
_remove_configs += [ "//build/config/compiler:default_optimization" ]
|
||||||
|
|
||||||
|
# PartitionAlloc is relatively hot (>1% of cycles for users of CrOS).
|
||||||
|
# Use speed-focused optimizations for it.
|
||||||
|
_add_configs += [ "//build/config/compiler:optimize_speed" ]
|
||||||
|
} else {
|
||||||
|
_remove_configs += [ "//build/config/compiler:default_optimization" ]
|
||||||
|
_add_configs += [ "//build/config/compiler:no_optimize" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
component("raw_ptr") {
|
||||||
|
# `gn check` is unhappy with most `#includes` when PA isn't
|
||||||
|
# actually built.
|
||||||
|
check_includes = use_partition_alloc
|
||||||
|
public = [
|
||||||
|
"pointers/instance_tracer.h",
|
||||||
|
"pointers/raw_ptr.h",
|
||||||
|
"pointers/raw_ptr_cast.h",
|
||||||
|
"pointers/raw_ptr_exclusion.h",
|
||||||
|
"pointers/raw_ptr_noop_impl.h",
|
||||||
|
"pointers/raw_ref.h",
|
||||||
|
]
|
||||||
|
sources = [ "pointers/instance_tracer.cc" ]
|
||||||
|
public_configs = [ ":public_includes" ]
|
||||||
|
configs += [ "//build/config/compiler:wexit_time_destructors" ]
|
||||||
|
|
||||||
|
if (enable_backup_ref_ptr_support) {
|
||||||
|
sources += [
|
||||||
|
"pointers/raw_ptr_backup_ref_impl.cc",
|
||||||
|
"pointers/raw_ptr_backup_ref_impl.h",
|
||||||
|
]
|
||||||
|
} else if (use_hookable_raw_ptr) {
|
||||||
|
sources += [
|
||||||
|
"pointers/raw_ptr_hookable_impl.cc",
|
||||||
|
"pointers/raw_ptr_hookable_impl.h",
|
||||||
|
]
|
||||||
|
} else if (use_asan_unowned_ptr) {
|
||||||
|
sources += [
|
||||||
|
"pointers/raw_ptr_asan_unowned_impl.cc",
|
||||||
|
"pointers/raw_ptr_asan_unowned_impl.h",
|
||||||
|
]
|
||||||
|
} else {
|
||||||
|
sources += [ "pointers/raw_ptr_noop_impl.h" ]
|
||||||
|
sources += [ "pointers/empty.cc" ]
|
||||||
|
}
|
||||||
|
if (use_partition_alloc) {
|
||||||
|
public_deps = [ ":partition_alloc" ]
|
||||||
|
}
|
||||||
|
deps = [ ":buildflags" ]
|
||||||
|
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_RAW_PTR_IMPL" ]
|
||||||
|
|
||||||
|
configs -= _remove_configs
|
||||||
|
configs += _add_configs
|
||||||
|
configs += [ ":dependants_extra_warnings" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Changes the freelist implementation to use pointer offsets in lieu
|
||||||
|
# of full-on pointers. Defaults to false, which implies the use of
|
||||||
|
# "encoded next" freelist entry.
|
||||||
|
#
|
||||||
|
# Only usable when pointers are 64-bit.
|
||||||
|
use_freelist_pool_offsets = has_64_bit_pointers && false
|
||||||
|
|
||||||
|
buildflag_header("partition_alloc_buildflags") {
|
||||||
|
header = "partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
_record_alloc_info = false
|
||||||
|
|
||||||
|
# GWP-ASan is tied to BRP's enablement.
|
||||||
|
_enable_gwp_asan_support = enable_backup_ref_ptr_support
|
||||||
|
|
||||||
|
# Pools are a logical concept when address space is 32-bit.
|
||||||
|
_glue_core_pools = glue_core_pools && has_64_bit_pointers
|
||||||
|
|
||||||
|
# Pointer compression requires 64-bit pointers.
|
||||||
|
_enable_pointer_compression =
|
||||||
|
enable_pointer_compression_support && has_64_bit_pointers
|
||||||
|
|
||||||
|
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
|
||||||
|
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
|
||||||
|
# PartitionAlloc. For PartitionAlloc,
|
||||||
|
# gen/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_buildflags.h
|
||||||
|
# defines and PartitionAlloc includes the header file. For chrome,
|
||||||
|
# gen/base/allocator/buildflags.h defines and chrome includes.
|
||||||
|
flags = [
|
||||||
|
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
|
||||||
|
"HAS_MEMORY_TAGGING=$has_memory_tagging",
|
||||||
|
|
||||||
|
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||||
|
"USE_LARGE_EMPTY_SLOT_SPAN_RING=$use_large_empty_slot_span_ring",
|
||||||
|
"USE_PARTITION_ALLOC=$use_partition_alloc",
|
||||||
|
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
|
||||||
|
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER=$enable_backup_ref_ptr_instance_tracer",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
|
||||||
|
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
|
||||||
|
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
|
||||||
|
"BACKUP_REF_PTR_EXTRA_OOB_CHECKS=$backup_ref_ptr_extra_oob_checks",
|
||||||
|
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
|
||||||
|
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
|
||||||
|
"USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr",
|
||||||
|
"USE_HOOKABLE_RAW_PTR=$use_hookable_raw_ptr",
|
||||||
|
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
|
||||||
|
|
||||||
|
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
|
||||||
|
|
||||||
|
"USE_FULL_MTE=$use_full_mte",
|
||||||
|
|
||||||
|
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||||
|
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
|
||||||
|
"GLUE_CORE_POOLS=$_glue_core_pools",
|
||||||
|
"ENABLE_POINTER_COMPRESSION=$_enable_pointer_compression",
|
||||||
|
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
|
||||||
|
"USE_FREELIST_POOL_OFFSETS=$use_freelist_pool_offsets",
|
||||||
|
|
||||||
|
"USE_STARSCAN=$use_starscan",
|
||||||
|
"PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
|
||||||
|
|
||||||
|
"ENABLE_PKEYS=$enable_pkeys",
|
||||||
|
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
|
||||||
|
|
||||||
|
"FORWARD_THROUGH_MALLOC=$forward_through_malloc",
|
||||||
|
"ASSERT_CPP_20=$assert_cpp20",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("raw_ptr_buildflags") {
|
||||||
|
header = "raw_ptr_buildflags.h"
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
|
||||||
|
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
|
||||||
|
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromecast_buildflags") {
|
||||||
|
header = "chromecast_buildflags.h"
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_IS_CAST_ANDROID=$is_cast_android",
|
||||||
|
"PA_IS_CASTOS=$is_castos",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromeos_buildflags") {
|
||||||
|
header = "chromeos_buildflags.h"
|
||||||
|
|
||||||
|
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("debugging_buildflags") {
|
||||||
|
header = "debugging_buildflags.h"
|
||||||
|
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
|
||||||
|
|
||||||
|
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
|
||||||
|
# but avails it as a buildflag.
|
||||||
|
_dcheck_is_on = is_debug || dcheck_always_on
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_DCHECK_IS_ON=$_dcheck_is_on",
|
||||||
|
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
|
||||||
|
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
|
||||||
|
"PA_CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
group("buildflags") {
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
":raw_ptr_buildflags",
|
||||||
|
]
|
||||||
|
public_configs = [ ":public_includes" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_clang_or_gcc) {
|
||||||
|
config("partition_alloc_implementation") {
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_PARTITION_ALLOC_IMPL" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("partition_alloc_base_implementation") {
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_PARTITION_ALLOC_BASE_IMPL" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("allocator_shim_implementation") {
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_ALLOCATOR_SHIM_IMPL" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("memory_tagging") {
|
||||||
|
if (current_cpu == "arm64" &&
|
||||||
|
(is_linux || is_chromeos || is_android || is_fuchsia)) {
|
||||||
|
# base/ has access to the MTE intrinsics because it needs to use them,
|
||||||
|
# but they're not backwards compatible. Use base::CPU::has_mte()
|
||||||
|
# beforehand to confirm or use indirect functions (ifuncs) to select
|
||||||
|
# an MTE-specific implementation at dynamic link-time.
|
||||||
|
cflags = [
|
||||||
|
"-Xclang",
|
||||||
|
"-target-feature",
|
||||||
|
"-Xclang",
|
||||||
|
"+mte",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
|
||||||
|
config("wrap_malloc_symbols") {
|
||||||
|
ldflags = [
|
||||||
|
"-Wl,-wrap,calloc",
|
||||||
|
"-Wl,-wrap,free",
|
||||||
|
"-Wl,-wrap,malloc",
|
||||||
|
"-Wl,-wrap,memalign",
|
||||||
|
"-Wl,-wrap,posix_memalign",
|
||||||
|
"-Wl,-wrap,pvalloc",
|
||||||
|
"-Wl,-wrap,realloc",
|
||||||
|
"-Wl,-wrap,valloc",
|
||||||
|
|
||||||
|
# Not allocating memory, but part of the API
|
||||||
|
"-Wl,-wrap,malloc_usable_size",
|
||||||
|
|
||||||
|
# <cstdlib> functions
|
||||||
|
"-Wl,-wrap,realpath",
|
||||||
|
|
||||||
|
# <string.h> functions
|
||||||
|
"-Wl,-wrap,strdup",
|
||||||
|
"-Wl,-wrap,strndup",
|
||||||
|
|
||||||
|
# <unistd.h> functions
|
||||||
|
"-Wl,-wrap,getcwd",
|
||||||
|
|
||||||
|
# <cstdio> functions
|
||||||
|
"-Wl,-wrap,asprintf",
|
||||||
|
"-Wl,-wrap,vasprintf",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("mac_no_default_new_delete_symbols") {
|
||||||
|
if (!is_component_build) {
|
||||||
|
# This is already set when we compile libc++, see
|
||||||
|
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as
|
||||||
|
# well, since the shim defines the symbols, to prevent them being exported.
|
||||||
|
cflags = [ "-fvisibility-global-new-delete=force-hidden" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_fuchsia) {
|
||||||
|
config("fuchsia_sync_lib") {
|
||||||
|
libs = [
|
||||||
|
"sync", # Used by spinning_mutex.h.
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enable_pkeys && is_debug) {
|
||||||
|
config("no_stack_protector") {
|
||||||
|
cflags = [ "-fno-stack-protector" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
group("partition_alloc") {
|
||||||
|
public_deps = [
|
||||||
|
":allocator_base",
|
||||||
|
":allocator_core",
|
||||||
|
":allocator_shim",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
component("allocator_core") {
|
||||||
|
visibility = [ ":*" ]
|
||||||
|
|
||||||
|
sources = [
|
||||||
|
"address_pool_manager.cc",
|
||||||
|
"address_pool_manager.h",
|
||||||
|
"address_pool_manager_bitmap.cc",
|
||||||
|
"address_pool_manager_bitmap.h",
|
||||||
|
"address_pool_manager_types.h",
|
||||||
|
"address_space_randomization.cc",
|
||||||
|
"address_space_randomization.h",
|
||||||
|
"address_space_stats.h",
|
||||||
|
"allocation_guard.cc",
|
||||||
|
"allocation_guard.h",
|
||||||
|
"compressed_pointer.cc",
|
||||||
|
"compressed_pointer.h",
|
||||||
|
"dangling_raw_ptr_checks.cc",
|
||||||
|
"dangling_raw_ptr_checks.h",
|
||||||
|
"flags.h",
|
||||||
|
"freeslot_bitmap.h",
|
||||||
|
"freeslot_bitmap_constants.h",
|
||||||
|
"gwp_asan_support.cc",
|
||||||
|
"gwp_asan_support.h",
|
||||||
|
"in_slot_metadata.h",
|
||||||
|
"internal_allocator.cc",
|
||||||
|
"internal_allocator.h",
|
||||||
|
"internal_allocator_forward.h",
|
||||||
|
"lightweight_quarantine.cc",
|
||||||
|
"lightweight_quarantine.h",
|
||||||
|
"memory_reclaimer.cc",
|
||||||
|
"memory_reclaimer.h",
|
||||||
|
"oom.cc",
|
||||||
|
"oom.h",
|
||||||
|
"oom_callback.cc",
|
||||||
|
"oom_callback.h",
|
||||||
|
"page_allocator.cc",
|
||||||
|
"page_allocator.h",
|
||||||
|
"page_allocator_constants.h",
|
||||||
|
"page_allocator_internal.h",
|
||||||
|
"partition_address_space.cc",
|
||||||
|
"partition_address_space.h",
|
||||||
|
"partition_alloc-inl.h",
|
||||||
|
"partition_alloc.cc",
|
||||||
|
"partition_alloc.h",
|
||||||
|
"partition_alloc_allocation_data.h",
|
||||||
|
"partition_alloc_check.h",
|
||||||
|
"partition_alloc_config.h",
|
||||||
|
"partition_alloc_constants.h",
|
||||||
|
"partition_alloc_forward.h",
|
||||||
|
"partition_alloc_hooks.cc",
|
||||||
|
"partition_alloc_hooks.h",
|
||||||
|
"partition_bucket.cc",
|
||||||
|
"partition_bucket.h",
|
||||||
|
"partition_bucket_lookup.h",
|
||||||
|
"partition_cookie.h",
|
||||||
|
"partition_dcheck_helper.cc",
|
||||||
|
"partition_dcheck_helper.h",
|
||||||
|
"partition_direct_map_extent.h",
|
||||||
|
"partition_freelist_entry.cc",
|
||||||
|
"partition_freelist_entry.h",
|
||||||
|
"partition_lock.h",
|
||||||
|
"partition_oom.cc",
|
||||||
|
"partition_oom.h",
|
||||||
|
"partition_page.cc",
|
||||||
|
"partition_page.h",
|
||||||
|
"partition_page_constants.h",
|
||||||
|
"partition_root.cc",
|
||||||
|
"partition_root.h",
|
||||||
|
"partition_stats.cc",
|
||||||
|
"partition_stats.h",
|
||||||
|
"partition_superpage_extent_entry.h",
|
||||||
|
"partition_tls.h",
|
||||||
|
"random.cc",
|
||||||
|
"random.h",
|
||||||
|
"reservation_offset_table.cc",
|
||||||
|
"reservation_offset_table.h",
|
||||||
|
"reverse_bytes.h",
|
||||||
|
"spinning_mutex.cc",
|
||||||
|
"spinning_mutex.h",
|
||||||
|
"tagging.cc",
|
||||||
|
"tagging.h",
|
||||||
|
"thread_cache.cc",
|
||||||
|
"thread_cache.h",
|
||||||
|
"thread_isolation/alignment.h",
|
||||||
|
"thread_isolation/pkey.cc",
|
||||||
|
"thread_isolation/pkey.h",
|
||||||
|
"thread_isolation/thread_isolation.cc",
|
||||||
|
"thread_isolation/thread_isolation.h",
|
||||||
|
"yield_processor.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (use_starscan) {
|
||||||
|
sources += [
|
||||||
|
"starscan/logging.h",
|
||||||
|
"starscan/pcscan.cc",
|
||||||
|
"starscan/pcscan.h",
|
||||||
|
"starscan/pcscan_internal.cc",
|
||||||
|
"starscan/pcscan_internal.h",
|
||||||
|
"starscan/pcscan_scheduling.cc",
|
||||||
|
"starscan/pcscan_scheduling.h",
|
||||||
|
"starscan/raceful_worklist.h",
|
||||||
|
"starscan/scan_loop.h",
|
||||||
|
"starscan/snapshot.cc",
|
||||||
|
"starscan/snapshot.h",
|
||||||
|
"starscan/stack/stack.cc",
|
||||||
|
"starscan/stack/stack.h",
|
||||||
|
"starscan/starscan_fwd.h",
|
||||||
|
"starscan/state_bitmap.h",
|
||||||
|
"starscan/stats_collector.cc",
|
||||||
|
"starscan/stats_collector.h",
|
||||||
|
"starscan/stats_reporter.h",
|
||||||
|
"starscan/write_protector.cc",
|
||||||
|
"starscan/write_protector.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
defines = []
|
||||||
|
if (is_win) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_win.h",
|
||||||
|
"partition_tls_win.cc",
|
||||||
|
]
|
||||||
|
} else if (is_posix) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_posix.cc",
|
||||||
|
"page_allocator_internals_posix.h",
|
||||||
|
]
|
||||||
|
} else if (is_fuchsia) {
|
||||||
|
sources += [ "page_allocator_internals_fuchsia.h" ]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
# The Android NDK supports PR_MTE_* macros as of NDK r23.
|
||||||
|
defines += [ "HAS_PR_MTE_MACROS" ]
|
||||||
|
}
|
||||||
|
if (use_starscan) {
|
||||||
|
if (current_cpu == "x64") {
|
||||||
|
assert(pcscan_stack_supported)
|
||||||
|
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "x86") {
|
||||||
|
assert(pcscan_stack_supported)
|
||||||
|
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm") {
|
||||||
|
assert(pcscan_stack_supported)
|
||||||
|
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm64") {
|
||||||
|
assert(pcscan_stack_supported)
|
||||||
|
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "riscv64") {
|
||||||
|
assert(pcscan_stack_supported)
|
||||||
|
sources += [ "starscan/stack/asm/riscv64/push_registers_asm.cc" ]
|
||||||
|
} else {
|
||||||
|
# To support a trampoline for another arch, please refer to v8/src/heap/base.
|
||||||
|
assert(!pcscan_stack_supported)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (use_freelist_pool_offsets) {
|
||||||
|
sources += [ "pool_offset_freelist.h" ]
|
||||||
|
} else {
|
||||||
|
sources += [ "encoded_next_freelist.h" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
]
|
||||||
|
|
||||||
|
configs += [
|
||||||
|
":partition_alloc_implementation",
|
||||||
|
":memory_tagging",
|
||||||
|
"//build/config/compiler:wexit_time_destructors",
|
||||||
|
]
|
||||||
|
deps = [ ":allocator_base" ]
|
||||||
|
public_configs = []
|
||||||
|
if (is_android) {
|
||||||
|
# tagging.cc requires __arm_mte_set_* functions.
|
||||||
|
deps += [ "//third_party/cpu_features:ndk_compat" ]
|
||||||
|
}
|
||||||
|
if (is_fuchsia) {
|
||||||
|
deps += [
|
||||||
|
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
|
||||||
|
]
|
||||||
|
public_deps += [
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/zx",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Needed for users of spinning_mutex.h, which for performance reasons,
|
||||||
|
# contains inlined calls to `libsync` inside the header file.
|
||||||
|
# It appends an entry to the "libs" section of the dependent target.
|
||||||
|
public_configs += [ ":fuchsia_sync_lib" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
frameworks = []
|
||||||
|
if (is_mac) {
|
||||||
|
# SecTaskGetCodeSignStatus needs:
|
||||||
|
frameworks += [ "Security.framework" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_apple) {
|
||||||
|
frameworks += [
|
||||||
|
"CoreFoundation.framework",
|
||||||
|
"Foundation.framework",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
configs -= _remove_configs
|
||||||
|
configs += _add_configs
|
||||||
|
configs += [ ":dependants_extra_warnings" ]
|
||||||
|
|
||||||
|
# We want to be able to test pkey mode without access to the default pkey.
|
||||||
|
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
|
||||||
|
if (enable_pkeys && is_debug) {
|
||||||
|
configs += [ ":no_stack_protector" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
component("allocator_base") {
|
||||||
|
visibility = [ ":*" ]
|
||||||
|
|
||||||
|
sources = [
|
||||||
|
"partition_alloc_base/atomic_ref_count.h",
|
||||||
|
"partition_alloc_base/augmentations/compiler_specific.h",
|
||||||
|
"partition_alloc_base/bit_cast.h",
|
||||||
|
"partition_alloc_base/bits.h",
|
||||||
|
"partition_alloc_base/check.cc",
|
||||||
|
"partition_alloc_base/check.h",
|
||||||
|
"partition_alloc_base/compiler_specific.h",
|
||||||
|
"partition_alloc_base/component_export.h",
|
||||||
|
"partition_alloc_base/cpu.cc",
|
||||||
|
"partition_alloc_base/cpu.h",
|
||||||
|
"partition_alloc_base/cxx20_is_constant_evaluated.h",
|
||||||
|
"partition_alloc_base/debug/alias.cc",
|
||||||
|
"partition_alloc_base/debug/alias.h",
|
||||||
|
"partition_alloc_base/debug/stack_trace.cc",
|
||||||
|
"partition_alloc_base/debug/stack_trace.h",
|
||||||
|
"partition_alloc_base/export_template.h",
|
||||||
|
"partition_alloc_base/immediate_crash.h",
|
||||||
|
"partition_alloc_base/log_message.cc",
|
||||||
|
"partition_alloc_base/log_message.h",
|
||||||
|
"partition_alloc_base/logging.cc",
|
||||||
|
"partition_alloc_base/logging.h",
|
||||||
|
"partition_alloc_base/memory/page_size.h",
|
||||||
|
"partition_alloc_base/memory/ref_counted.cc",
|
||||||
|
"partition_alloc_base/memory/ref_counted.h",
|
||||||
|
"partition_alloc_base/memory/scoped_policy.h",
|
||||||
|
"partition_alloc_base/memory/scoped_refptr.h",
|
||||||
|
"partition_alloc_base/no_destructor.h",
|
||||||
|
"partition_alloc_base/notreached.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_shared_impl.h",
|
||||||
|
"partition_alloc_base/posix/eintr_wrapper.h",
|
||||||
|
"partition_alloc_base/process/process_handle.h",
|
||||||
|
"partition_alloc_base/rand_util.cc",
|
||||||
|
"partition_alloc_base/rand_util.h",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error.h",
|
||||||
|
"partition_alloc_base/strings/cstring_builder.cc",
|
||||||
|
"partition_alloc_base/strings/cstring_builder.h",
|
||||||
|
"partition_alloc_base/strings/safe_sprintf.cc",
|
||||||
|
"partition_alloc_base/strings/safe_sprintf.h",
|
||||||
|
"partition_alloc_base/strings/string_util.cc",
|
||||||
|
"partition_alloc_base/strings/string_util.h",
|
||||||
|
"partition_alloc_base/strings/stringprintf.cc",
|
||||||
|
"partition_alloc_base/strings/stringprintf.h",
|
||||||
|
"partition_alloc_base/system/sys_info.h",
|
||||||
|
"partition_alloc_base/thread_annotations.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_ref.h",
|
||||||
|
"partition_alloc_base/time/time.cc",
|
||||||
|
"partition_alloc_base/time/time.h",
|
||||||
|
"partition_alloc_base/time/time_override.cc",
|
||||||
|
"partition_alloc_base/time/time_override.h",
|
||||||
|
"partition_alloc_base/types/strong_alias.h",
|
||||||
|
"partition_alloc_base/win/win_handle_types.h",
|
||||||
|
"partition_alloc_base/win/win_handle_types_list.inc",
|
||||||
|
"partition_alloc_base/win/windows_types.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_win) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/debug/stack_trace_win.cc",
|
||||||
|
"partition_alloc_base/memory/page_size_win.cc",
|
||||||
|
"partition_alloc_base/process/process_handle_win.cc",
|
||||||
|
"partition_alloc_base/rand_util_win.cc",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error_win.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_win.cc",
|
||||||
|
"partition_alloc_base/time/time_win.cc",
|
||||||
|
]
|
||||||
|
} else if (is_posix) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/debug/stack_trace_posix.cc",
|
||||||
|
"partition_alloc_base/files/file_util.h",
|
||||||
|
"partition_alloc_base/files/file_util_posix.cc",
|
||||||
|
"partition_alloc_base/memory/page_size_posix.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/process/process_handle_posix.cc",
|
||||||
|
"partition_alloc_base/rand_util_posix.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_linux || is_chromeos) {
|
||||||
|
sources += [ "partition_alloc_base/debug/stack_trace_linux.cc" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_android || is_chromeos_ash) {
|
||||||
|
sources += [ "partition_alloc_base/time/time_android.cc" ]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
# Request <dlfcn.h> to provide the `dladdr()` function. This is used to
|
||||||
|
# translate address to symbolic information.
|
||||||
|
defines = [ "HAVE_DLADDR" ]
|
||||||
|
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/debug/stack_trace_mac.cc",
|
||||||
|
"partition_alloc_base/time/time_apple.mm",
|
||||||
|
]
|
||||||
|
} else {
|
||||||
|
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
|
||||||
|
}
|
||||||
|
} else if (is_fuchsia) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.h",
|
||||||
|
"partition_alloc_base/memory/page_size_posix.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/rand_util_fuchsia.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_fuchsia.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
# Only android build requires native_library, and native_library depends
|
||||||
|
# on file_path. So file_path is added if is_android = true.
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/debug/stack_trace_android.cc",
|
||||||
|
"partition_alloc_base/files/file_path.cc",
|
||||||
|
"partition_alloc_base/files/file_path.h",
|
||||||
|
"partition_alloc_base/native_library.cc",
|
||||||
|
"partition_alloc_base/native_library.h",
|
||||||
|
"partition_alloc_base/native_library_posix.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
# Apple-specific utilities
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/apple/foundation_util.h",
|
||||||
|
"partition_alloc_base/apple/foundation_util.mm",
|
||||||
|
"partition_alloc_base/apple/mach_logging.cc",
|
||||||
|
"partition_alloc_base/apple/mach_logging.h",
|
||||||
|
"partition_alloc_base/apple/scoped_cftyperef.h",
|
||||||
|
"partition_alloc_base/apple/scoped_typeref.h",
|
||||||
|
]
|
||||||
|
if (is_ios) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/ios/ios_util.h",
|
||||||
|
"partition_alloc_base/ios/ios_util.mm",
|
||||||
|
"partition_alloc_base/system/sys_info_ios.mm",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_mac) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/mac/mac_util.h",
|
||||||
|
"partition_alloc_base/mac/mac_util.mm",
|
||||||
|
"partition_alloc_base/system/sys_info_mac.mm",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
]
|
||||||
|
public_configs = [ ":public_includes" ]
|
||||||
|
configs += [
|
||||||
|
":partition_alloc_base_implementation",
|
||||||
|
"//build/config/compiler:wexit_time_destructors",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = []
|
||||||
|
if (is_fuchsia) {
|
||||||
|
public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/fit" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
frameworks = []
|
||||||
|
if (is_apple) {
|
||||||
|
frameworks += [
|
||||||
|
"CoreFoundation.framework",
|
||||||
|
"Foundation.framework",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
configs -= _remove_configs
|
||||||
|
configs += _add_configs
|
||||||
|
configs += [ ":dependants_extra_warnings" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
component("allocator_shim") {
|
||||||
|
visibility = [ ":*" ]
|
||||||
|
|
||||||
|
sources = []
|
||||||
|
deps = []
|
||||||
|
all_dependent_configs = []
|
||||||
|
public_configs = [ ":public_includes" ]
|
||||||
|
configs += [
|
||||||
|
":allocator_shim_implementation",
|
||||||
|
"//build/config/compiler:wexit_time_destructors",
|
||||||
|
]
|
||||||
|
frameworks = []
|
||||||
|
|
||||||
|
configs -= _remove_configs
|
||||||
|
configs += _add_configs
|
||||||
|
configs += [ ":dependants_extra_warnings" ]
|
||||||
|
|
||||||
|
shim_headers = []
|
||||||
|
shim_sources = []
|
||||||
|
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_shim.cc",
|
||||||
|
"shim/allocator_shim_dispatch_to_noop_on_free.cc",
|
||||||
|
]
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim.h",
|
||||||
|
"shim/allocator_shim_internals.h",
|
||||||
|
"shim/shim_alloc_functions.h",
|
||||||
|
"shim/allocator_shim_functions.h",
|
||||||
|
"shim/allocator_dispatch.h",
|
||||||
|
"shim/allocator_shim_dispatch_to_noop_on_free.h",
|
||||||
|
]
|
||||||
|
if (use_partition_alloc) {
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_shim_default_dispatch_to_partition_alloc.cc",
|
||||||
|
"shim/nonscannable_allocator.cc",
|
||||||
|
]
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
"shim/nonscannable_allocator.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim_override_cpp_symbols.h",
|
||||||
|
"shim/allocator_shim_override_linker_wrapped_symbols.h",
|
||||||
|
]
|
||||||
|
shim_sources -= [ "shim/allocator_shim.cc" ]
|
||||||
|
shim_sources += [ "shim/allocator_shim_android.cc" ]
|
||||||
|
if (use_allocator_shim) {
|
||||||
|
all_dependent_configs += [ ":wrap_malloc_symbols" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim_override_apple_default_zone.h",
|
||||||
|
"shim/allocator_shim_override_apple_symbols.h",
|
||||||
|
"shim/early_zone_registration_constants.h",
|
||||||
|
"shim/allocator_interception_apple.h",
|
||||||
|
"shim/malloc_zone_functions_apple.h",
|
||||||
|
]
|
||||||
|
shim_sources -= [ "shim/allocator_shim.cc" ]
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_interception_apple.mm",
|
||||||
|
"shim/allocator_shim_apple.cc",
|
||||||
|
"shim/malloc_zone_functions_apple.cc",
|
||||||
|
]
|
||||||
|
frameworks += [ "CoreFoundation.framework" ]
|
||||||
|
|
||||||
|
if (use_allocator_shim) {
|
||||||
|
configs += [ ":mac_no_default_new_delete_symbols" ]
|
||||||
|
|
||||||
|
# Do not compile with ARC because this target has to interface with
|
||||||
|
# low-level Objective-C and having ARC would interfere.
|
||||||
|
configs -= [ "//build/config/compiler:enable_arc" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (is_chromeos || is_linux) {
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim_override_cpp_symbols.h",
|
||||||
|
"shim/allocator_shim_override_glibc_weak_symbols.h",
|
||||||
|
"shim/allocator_shim_override_libc_symbols.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_win) {
|
||||||
|
shim_headers += [
|
||||||
|
"shim/allocator_shim_override_ucrt_symbols_win.h",
|
||||||
|
"shim/winheap_stubs_win.h",
|
||||||
|
]
|
||||||
|
shim_sources -= [ "shim/allocator_shim.cc" ]
|
||||||
|
shim_sources += [ "shim/winheap_stubs_win.cc" ]
|
||||||
|
|
||||||
|
if (!is_component_build) {
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_shim_win_static.cc",
|
||||||
|
"shim/checked_multiply_win.h",
|
||||||
|
]
|
||||||
|
} else {
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_shim_functions_win_component.cc",
|
||||||
|
"shim/allocator_shim_win_component.cc",
|
||||||
|
]
|
||||||
|
|
||||||
|
# allocator_shim cannot depend on libc++ objects because they use malloc()
|
||||||
|
# internally.
|
||||||
|
no_default_deps = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_partition_alloc_as_malloc) {
|
||||||
|
if (is_android) {
|
||||||
|
shim_sources += [
|
||||||
|
"shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
shim_sources +=
|
||||||
|
[ "shim/allocator_shim_default_dispatch_to_apple_zoned_malloc.cc" ]
|
||||||
|
}
|
||||||
|
if (is_chromeos || is_linux) {
|
||||||
|
shim_sources += [ "shim/allocator_shim_default_dispatch_to_glibc.cc" ]
|
||||||
|
}
|
||||||
|
if (is_win) {
|
||||||
|
shim_sources += [ "shim/allocator_shim_default_dispatch_to_winheap.cc" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sources = shim_headers
|
||||||
|
if (use_allocator_shim) {
|
||||||
|
sources += shim_sources
|
||||||
|
} else {
|
||||||
|
# To avoid "lld-link: error: <root>: undefined symbol: _DllMainCRTStartup",
|
||||||
|
# at least one object file is required when linking allocator_shim.dll.
|
||||||
|
sources += [ "shim/empty.cc" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
deps += [
|
||||||
|
":allocator_base",
|
||||||
|
":allocator_core",
|
||||||
|
":buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
} # if (is_clang_or_gcc)
|
||||||
|
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
|
||||||
|
# move test code here. i.e. test("partition_alloc_tests") { ... } and
|
||||||
|
# test("partition_alloc_perftests").
|
@ -0,0 +1,571 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/address_pool_manager.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/address_space_stats.h"
|
||||||
|
#include "partition_alloc/page_allocator.h"
|
||||||
|
#include "partition_alloc/page_allocator_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/notreached.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
#include "partition_alloc/reservation_offset_table.h"
|
||||||
|
#include "partition_alloc/thread_isolation/alignment.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
AddressPoolManager AddressPoolManager::singleton_;
|
||||||
|
|
||||||
|
// static
|
||||||
|
AddressPoolManager& AddressPoolManager::GetInstance() {
|
||||||
|
return singleton_;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
// Allocations are all performed on behalf of PartitionAlloc.
|
||||||
|
constexpr PageTag kPageTag = PageTag::kPartitionAlloc;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// This will crash if the range cannot be decommitted.
|
||||||
|
void DecommitPages(uintptr_t address, size_t size) {
|
||||||
|
// Callers rely on the pages being zero-initialized when recommitting them.
|
||||||
|
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
|
||||||
|
// particular on macOS, but |DecommitAndZeroSystemPages| does.
|
||||||
|
DecommitAndZeroSystemPages(address, size, kPageTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
|
||||||
|
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
|
||||||
|
PA_CHECK(handle > 0 && handle <= std::size(pools_));
|
||||||
|
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_CHECK(!pool->IsInitialized());
|
||||||
|
pool->Initialize(ptr, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolUsedSuperPages(
|
||||||
|
pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pool->GetUsedSuperPages(used);
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool->GetBaseAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
for (size_t i = 0; i < std::size(pools_); ++i) {
|
||||||
|
pools_[i].Reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Remove(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
pool->Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!requested_address) {
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
}
|
||||||
|
const bool is_available = pool->TryReserveChunk(requested_address, length);
|
||||||
|
if (is_available) {
|
||||||
|
return requested_address;
|
||||||
|
}
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
DecommitPages(address, length);
|
||||||
|
pool->FreeChunk(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
|
||||||
|
PA_CHECK(ptr != 0);
|
||||||
|
PA_CHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_CHECK(!(length & kSuperPageOffsetMask));
|
||||||
|
address_begin_ = ptr;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
address_end_ = ptr + length;
|
||||||
|
PA_DCHECK(address_begin_ < address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
total_bits_ = length / kSuperPageSize;
|
||||||
|
PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
alloc_bitset_.reset();
|
||||||
|
bit_hint_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::IsInitialized() {
|
||||||
|
return address_begin_ != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Reset() {
|
||||||
|
address_begin_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetUsedSuperPages(
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
used = alloc_bitset_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
return address_begin_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t need_bits = requested_size >> kSuperPageShift;
|
||||||
|
|
||||||
|
// Use first-fit policy to find an available chunk from free chunks. Start
|
||||||
|
// from |bit_hint_|, because we know there are no free chunks before.
|
||||||
|
size_t beg_bit = bit_hint_;
|
||||||
|
size_t curr_bit = bit_hint_;
|
||||||
|
while (true) {
|
||||||
|
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
|
||||||
|
// |total_bits_|, return |nullptr| to signal no free chunk was found.
|
||||||
|
size_t end_bit = beg_bit + need_bits;
|
||||||
|
if (end_bit > total_bits_) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool found = true;
|
||||||
|
for (; curr_bit < end_bit; ++curr_bit) {
|
||||||
|
if (alloc_bitset_.test(curr_bit)) {
|
||||||
|
// The bit was set, so this chunk isn't entirely free. Set |found=false|
|
||||||
|
// to ensure the outer loop continues. However, continue the inner loop
|
||||||
|
// to set |beg_bit| just past the last set bit in the investigated
|
||||||
|
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
|
||||||
|
// next outer loop pass from checking the same bits.
|
||||||
|
beg_bit = curr_bit + 1;
|
||||||
|
found = false;
|
||||||
|
if (bit_hint_ == curr_bit) {
|
||||||
|
++bit_hint_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
|
||||||
|
// mark as allocated) and return the allocated address.
|
||||||
|
if (found) {
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
if (bit_hint_ == beg_bit) {
|
||||||
|
bit_hint_ = end_bit;
|
||||||
|
}
|
||||||
|
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + requested_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOTREACHED();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
|
||||||
|
size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t need_bits = requested_size / kSuperPageSize;
|
||||||
|
const size_t end_bit = begin_bit + need_bits;
|
||||||
|
// Check that requested address is not too high.
|
||||||
|
if (end_bit > total_bits_) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Check if any bit of the requested region is set already.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
if (alloc_bitset_.test(i)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise, set the bits.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
|
||||||
|
|
||||||
|
PA_DCHECK(address_begin_ <= address);
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + free_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.reset(i);
|
||||||
|
}
|
||||||
|
bit_hint_ = std::min(bit_hint_, beg_bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
|
||||||
|
std::bitset<kMaxSuperPagesInPool> pages;
|
||||||
|
size_t i;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
pages = alloc_bitset_;
|
||||||
|
i = bit_hint_;
|
||||||
|
}
|
||||||
|
|
||||||
|
stats->usage = pages.count();
|
||||||
|
|
||||||
|
size_t largest_run = 0;
|
||||||
|
size_t current_run = 0;
|
||||||
|
for (; i < total_bits_; ++i) {
|
||||||
|
if (!pages[i]) {
|
||||||
|
current_run += 1;
|
||||||
|
continue;
|
||||||
|
} else if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
current_run = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fell out of the loop with last bit being zero. Check once more.
|
||||||
|
if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
stats->largest_available_reservation = largest_run;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolStats(const pool_handle handle,
|
||||||
|
PoolStats* stats) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool->IsInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
pool->GetStats(stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
// Get 64-bit pool stats.
|
||||||
|
GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
if (IsConfigurablePoolAvailable()) {
|
||||||
|
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
|
||||||
|
}
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats);
|
||||||
|
#endif
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
|
||||||
|
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
|
||||||
|
"kGuardOffsetOfBRPPoolBitmap.");
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void SetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!bitmap.test(i));
|
||||||
|
bitmap.set(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void ResetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(bitmap.test(i));
|
||||||
|
bitmap.reset(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t address =
|
||||||
|
AllocPages(requested_address, length, kSuperPageSize,
|
||||||
|
PageAccessibilityConfiguration(
|
||||||
|
PageAccessibilityConfiguration::kInaccessible),
|
||||||
|
kPageTag);
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
FreePages(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUsed(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given:
|
||||||
|
//
|
||||||
|
// ------+---+---------------+---+----
|
||||||
|
// memory ..... | B | managed by PA | B | ...
|
||||||
|
// regions ------+---+---------------+---+----
|
||||||
|
//
|
||||||
|
// B: PartitionPageSize()-bytes block. This is used internally by the
|
||||||
|
// allocator and is not available for callers.
|
||||||
|
//
|
||||||
|
// This is required to avoid crash caused by the following code:
|
||||||
|
// {
|
||||||
|
// // Assume this allocation happens outside of PartitionAlloc.
|
||||||
|
// raw_ptr<T> ptr = new T[20];
|
||||||
|
// for (size_t i = 0; i < 20; i ++) { ptr++; }
|
||||||
|
// // |ptr| may point to an address inside 'B'.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Suppose that |ptr| points to an address inside B after the loop. If
|
||||||
|
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
|
||||||
|
// crash, since the memory is not allocated by PartitionAlloc.
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUnused(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
// Address regions allocated for normal buckets are never released, so this
|
||||||
|
// function can only be called for direct map. However, do not DCHECK on
|
||||||
|
// IsManagedByDirectMap(address), because many tests test this function using
|
||||||
|
// small allocations.
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given.
|
||||||
|
// (See MarkUsed comment)
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_.reset();
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Counts super pages in use represented by `bitmap`.
|
||||||
|
template <size_t bitsize>
|
||||||
|
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
|
||||||
|
const size_t bits_per_super_page) {
|
||||||
|
size_t count = 0;
|
||||||
|
size_t bit_index = 0;
|
||||||
|
|
||||||
|
// Stride over super pages.
|
||||||
|
for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
|
||||||
|
// Stride over the bits comprising the super page.
|
||||||
|
for (bit_index = super_page_index * bits_per_super_page;
|
||||||
|
bit_index < (super_page_index + 1) * bits_per_super_page &&
|
||||||
|
bit_index < bitsize;
|
||||||
|
++bit_index) {
|
||||||
|
if (bitmap[bit_index]) {
|
||||||
|
count += 1;
|
||||||
|
// Move on to the next super page.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
|
||||||
|
brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
|
||||||
|
} // scoped_lock
|
||||||
|
|
||||||
|
// Pool usage is read out from the address pool bitmaps.
|
||||||
|
// The output stats are sized in super pages, so we interpret
|
||||||
|
// the bitmaps into super page usage.
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize %
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kRegularPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize /
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Get 32-bit pool usage.
|
||||||
|
stats->regular_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kBRPPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
stats->brp_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
|
||||||
|
|
||||||
|
// Get blocklist size.
|
||||||
|
for (const auto& blocked :
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
|
||||||
|
if (blocked.load(std::memory_order_relaxed)) {
|
||||||
|
stats->blocklist_size += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count failures in finding non-blocklisted addresses.
|
||||||
|
stats->blocklist_hit_count =
|
||||||
|
AddressPoolManagerBitmap::blocklist_hit_count_.load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
|
||||||
|
AddressSpaceStats stats{};
|
||||||
|
if (GetStats(&stats)) {
|
||||||
|
dumper->DumpStats(&stats);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
// This function just exists to static_assert the layout of the private fields
|
||||||
|
// in Pool.
|
||||||
|
void AddressPoolManager::AssertThreadIsolatedLayout() {
|
||||||
|
constexpr size_t last_pool_offset =
|
||||||
|
offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1);
|
||||||
|
constexpr size_t alloc_bitset_offset =
|
||||||
|
last_pool_offset + offsetof(Pool, alloc_bitset_);
|
||||||
|
static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
|
||||||
|
static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
@ -0,0 +1,209 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
|
||||||
|
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/address_pool_manager_types.h"
|
||||||
|
#include "partition_alloc/partition_address_space.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
#include "partition_alloc/partition_lock.h"
|
||||||
|
#include "partition_alloc/thread_isolation/alignment.h"
|
||||||
|
#include "partition_alloc/thread_isolation/thread_isolation.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
#include "partition_alloc/address_pool_manager_bitmap.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
class AddressSpaceStatsDumper;
|
||||||
|
struct AddressSpaceStats;
|
||||||
|
struct PoolStats;
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// (64bit version)
|
||||||
|
// AddressPoolManager takes a reserved virtual address space and manages address
|
||||||
|
// space allocation.
|
||||||
|
//
|
||||||
|
// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a
|
||||||
|
// contiguous reserved address space. Alloc() takes a pool_handle and returns
|
||||||
|
// address regions from the specified pool. Free() also takes a pool_handle and
|
||||||
|
// returns the address region back to the manager.
|
||||||
|
//
|
||||||
|
// (32bit version)
|
||||||
|
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
|
||||||
|
// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps
|
||||||
|
// to judge whether a given address is in a pool that supports BackupRefPtr or
|
||||||
|
// in a pool that doesn't. All PartitionAlloc allocations must be in either of
|
||||||
|
// the pools.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
PA_THREAD_ISOLATED_ALIGN AddressPoolManager {
|
||||||
|
public:
|
||||||
|
static AddressPoolManager& GetInstance();
|
||||||
|
|
||||||
|
AddressPoolManager(const AddressPoolManager&) = delete;
|
||||||
|
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
void Add(pool_handle handle, uintptr_t address, size_t length);
|
||||||
|
void Remove(pool_handle handle);
|
||||||
|
|
||||||
|
// Populate a |used| bitset of superpages currently in use.
|
||||||
|
void GetPoolUsedSuperPages(pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
|
||||||
|
// Return the base address of a pool.
|
||||||
|
uintptr_t GetPoolBaseAddress(pool_handle handle);
|
||||||
|
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
// Reserves address space from the pool.
|
||||||
|
uintptr_t Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length);
|
||||||
|
|
||||||
|
// Frees address space back to the pool and decommits underlying system pages.
|
||||||
|
void UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length);
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
void DumpStats(AddressSpaceStatsDumper* dumper);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManagerForTesting;
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
// If we use a thread isolated pool, we need to write-protect its metadata.
|
||||||
|
// Allow the function to get access to the pool pointer.
|
||||||
|
friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
constexpr AddressPoolManager() = default;
|
||||||
|
~AddressPoolManager() = default;
|
||||||
|
|
||||||
|
// Populates `stats` if applicable.
|
||||||
|
// Returns whether `stats` was populated. (They might not be, e.g.
|
||||||
|
// if PartitionAlloc is wholly unused in this process.)
|
||||||
|
bool GetStats(AddressSpaceStats* stats);
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
static void AssertThreadIsolatedLayout();
|
||||||
|
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
class Pool {
|
||||||
|
public:
|
||||||
|
constexpr Pool() = default;
|
||||||
|
~Pool() = default;
|
||||||
|
|
||||||
|
Pool(const Pool&) = delete;
|
||||||
|
Pool& operator=(const Pool&) = delete;
|
||||||
|
|
||||||
|
void Initialize(uintptr_t ptr, size_t length);
|
||||||
|
bool IsInitialized();
|
||||||
|
void Reset();
|
||||||
|
|
||||||
|
uintptr_t FindChunk(size_t size);
|
||||||
|
void FreeChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
bool TryReserveChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
uintptr_t GetBaseAddress();
|
||||||
|
|
||||||
|
void GetStats(PoolStats* stats);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// The lock needs to be the first field in this class.
|
||||||
|
// We write-protect the pool in the ThreadIsolated case, except that the
|
||||||
|
// lock can be used without acquiring write-permission first (via
|
||||||
|
// DumpStats()). So instead of protecting the whole variable, we only
|
||||||
|
// protect the memory after the lock.
|
||||||
|
// See the alignment of ` below.
|
||||||
|
Lock lock_;
|
||||||
|
|
||||||
|
// The bitset stores the allocation state of the address pool. 1 bit per
|
||||||
|
// super-page: 1 = allocated, 0 = free.
|
||||||
|
std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
// An index of a bit in the bitset before which we know for sure there all
|
||||||
|
// 1s. This is a best-effort hint in the sense that there still may be lots
|
||||||
|
// of 1s after this index, but at least we know there is no point in
|
||||||
|
// starting the search before it.
|
||||||
|
size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
|
||||||
|
|
||||||
|
size_t total_bits_ = 0;
|
||||||
|
uintptr_t address_begin_ = 0;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
uintptr_t address_end_ = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
friend class AddressPoolManager;
|
||||||
|
friend void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption);
|
||||||
|
#endif // BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
|
||||||
|
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
|
||||||
|
return &pools_[handle - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the stats for the pool identified by `handle`, if
|
||||||
|
// initialized.
|
||||||
|
void GetPoolStats(pool_handle handle, PoolStats* stats);
|
||||||
|
|
||||||
|
// If thread isolation support is enabled, we need to write-protect the
|
||||||
|
// isolated pool (which needs to be last). For this, we need to add padding in
|
||||||
|
// front of the pools so that the isolated one starts on a page boundary.
|
||||||
|
// We also skip the Lock at the beginning of the pool since it needs to be
|
||||||
|
// used in contexts where we didn't enable write access to the pool memory.
|
||||||
|
#if defined(__clang__)
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wzero-length-array"
|
||||||
|
#endif
|
||||||
|
char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET(
|
||||||
|
Pool,
|
||||||
|
kNumPools,
|
||||||
|
offsetof(Pool, alloc_bitset_))] = {};
|
||||||
|
#if defined(__clang__)
|
||||||
|
#pragma clang diagnostic pop
|
||||||
|
#endif
|
||||||
|
Pool pools_[kNumPools];
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
static PA_CONSTINIT AddressPoolManager singleton_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_H_
|
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/address_pool_manager_bitmap.h"
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
Lock g_lock;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
Lock& AddressPoolManagerBitmap::GetLock() {
|
||||||
|
return g_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
std::array<std::atomic_bool,
|
||||||
|
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
|
||||||
|
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
|
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
#include "partition_alloc/partition_lock.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
|
||||||
|
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
|
||||||
|
// support it. All PartitionAlloc allocations must be in either of the pools.
|
||||||
|
//
|
||||||
|
// This code is specific to 32-bit systems.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
|
||||||
|
public:
|
||||||
|
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
|
||||||
|
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
|
||||||
|
|
||||||
|
// For BRP pool, we use partition page granularity to eliminate the guard
|
||||||
|
// pages from the bitmap at the ends:
|
||||||
|
// - Eliminating the guard page at the beginning is needed so that pointers
|
||||||
|
// to the end of an allocation that immediately precede a super page in BRP
|
||||||
|
// pool don't accidentally fall into that pool.
|
||||||
|
// - Eliminating the guard page at the end is to ensure that the last page
|
||||||
|
// of the address space isn't in the BRP pool. This allows using sentinels
|
||||||
|
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
|
||||||
|
// on an invalid address. (Note, 64-bit systems don't have this problem as
|
||||||
|
// the upper half of the address space always belongs to the OS.)
|
||||||
|
//
|
||||||
|
// Note, direct map allocations also belong to this pool. The same logic as
|
||||||
|
// above applies. It is important to note, however, that the granularity used
|
||||||
|
// here has to be a minimum of partition page size and direct map allocation
|
||||||
|
// granularity. Since DirectMapAllocationGranularity() is no smaller than
|
||||||
|
// PageAllocationGranularity(), we don't need to decrease the bitmap
|
||||||
|
// granularity any further.
|
||||||
|
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
|
||||||
|
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
|
||||||
|
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
|
||||||
|
static constexpr size_t kBRPPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
|
||||||
|
// Regular pool may include both normal bucket and direct map allocations, so
|
||||||
|
// the bitmap granularity has to be at least as small as
|
||||||
|
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
|
||||||
|
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
|
||||||
|
// the granularity to partition page size.
|
||||||
|
static constexpr size_t kBitShiftOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularityShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularity();
|
||||||
|
static_assert(kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
1 << kBitShiftOfRegularPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kRegularPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
static_assert(
|
||||||
|
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
|
||||||
|
regular_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of bounds "
|
||||||
|
"accesses.");
|
||||||
|
// It is safe to read |regular_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
static_assert(std::numeric_limits<uintptr_t>::max() >>
|
||||||
|
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of "
|
||||||
|
"bounds accesses.");
|
||||||
|
// It is safe to read |brp_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
static void BanSuperPageFromBRPPool(uintptr_t address) {
|
||||||
|
brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
|
||||||
|
true, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
|
||||||
|
// The only potentially dangerous scenario, in which this check is used, is
|
||||||
|
// when the assignment of the first raw_ptr<T> object for an address
|
||||||
|
// allocated outside the BRP pool is racing with the allocation of a new
|
||||||
|
// super page at the same address. We assume that if raw_ptr<T> is being
|
||||||
|
// initialized with a raw pointer, the associated allocation is "alive";
|
||||||
|
// otherwise, the issue should be fixed by rewriting the raw pointer
|
||||||
|
// variable as raw_ptr<T>. In the worst case, when such a fix is
|
||||||
|
// impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
|
||||||
|
// the problematic field. If the above assumption holds, the existing
|
||||||
|
// allocation will prevent us from reserving the super-page region and,
|
||||||
|
// thus, having the race condition. Since we rely on that external
|
||||||
|
// synchronization, the relaxed memory ordering should be sufficient.
|
||||||
|
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManager;
|
||||||
|
|
||||||
|
static Lock& GetLock();
|
||||||
|
|
||||||
|
static std::bitset<kRegularPoolBits> regular_pool_bits_
|
||||||
|
PA_GUARDED_BY(GetLock());
|
||||||
|
static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
|
||||||
|
brp_forbidden_super_page_map_;
|
||||||
|
static std::atomic_size_t blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
|
||||||
|
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
|
||||||
|
// No need to add IsManagedByConfigurablePool, because Configurable Pool
|
||||||
|
// doesn't exist on 32-bit.
|
||||||
|
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
|
||||||
|
#endif
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
|
||||||
|
uintptr_t address) {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_BITMAP_H_
|
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
enum pool_handle : unsigned;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_POOL_MANAGER_TYPES_H_
|
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/address_space_randomization.h"
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/random.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
uintptr_t GetRandomPageBase() {
|
||||||
|
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
random <<= 32ULL;
|
||||||
|
random |= static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
// The ASLRMask() and ASLROffset() constants will be suitable for the
|
||||||
|
// OS and build configuration.
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// On win32 host systems the randomization plus huge alignment causes
|
||||||
|
// excessive fragmentation. Plus most of these systems lack ASLR, so the
|
||||||
|
// randomization isn't buying anything. In that case we just skip it.
|
||||||
|
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
|
||||||
|
static BOOL is_wow64 = -1;
|
||||||
|
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
|
||||||
|
is_wow64 = FALSE;
|
||||||
|
}
|
||||||
|
if (!is_wow64) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
|
||||||
|
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
return random;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,290 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/page_allocator_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Calculates a random preferred mapping address. In calculating an address, we
|
||||||
|
// balance good ASLR against not fragmenting the address space too badly.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
AslrAddress(uintptr_t mask) {
|
||||||
|
return mask & PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
AslrMask(uintptr_t bits) {
|
||||||
|
return AslrAddress((1ULL << bits) - 1ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn off formatting, because the thicket of nested ifdefs below is
|
||||||
|
// incomprehensible without indentation. It is also incomprehensible with
|
||||||
|
// indentation, but the only other option is a combinatorial explosion of
|
||||||
|
// *_{win,linux,mac,foo}_{32,64}.h files.
|
||||||
|
//
|
||||||
|
// clang-format off
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
|
||||||
|
// We shouldn't allocate system pages at all for sanitizer builds. However,
|
||||||
|
// we do, and if random hint addresses interfere with address ranges
|
||||||
|
// hard-coded in those tools, bad things happen. This address range is
|
||||||
|
// copied from TSAN source but works with all tools. See
|
||||||
|
// https://crbug.com/539863.
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrAddress(0x007fffffffffULL);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x7e8000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
// Windows 8.10 and newer support the full 48 bit address range. Since
|
||||||
|
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
|
||||||
|
// http://www.alex-ionescu.com/?p=246
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(47);
|
||||||
|
}
|
||||||
|
// Try not to map pages into the range where Windows loads DLLs by default.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return 0x80000000ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
|
||||||
|
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
|
||||||
|
// is destroyed. Using a virtual address space that is too large causes a
|
||||||
|
// leak of about 1 wired [can never be paged out] page per call to mmap. The
|
||||||
|
// page is only reclaimed when the process is killed. Confine the hint to a
|
||||||
|
// 39-bit section of the virtual address space.
|
||||||
|
//
|
||||||
|
// This implementation adapted from
|
||||||
|
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
|
||||||
|
// is that here we clamp to 39 bits, not 32.
|
||||||
|
//
|
||||||
|
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
|
||||||
|
// changes.
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
// Be careful, there is a zone where macOS will not map memory, at least
|
||||||
|
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
|
||||||
|
// [0x1000000000, 0x7000000000). Make sure that the range we use is
|
||||||
|
// outside these bounds. In 12.3, there is a reserved area between
|
||||||
|
// MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
|
||||||
|
// which is reserved on ARM64. See these constants in XNU's source code
|
||||||
|
// for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
|
||||||
|
return AslrAddress(0x10000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_X86_64)
|
||||||
|
|
||||||
|
// Linux (and macOS) support the full 47-bit user space of x64 processors.
|
||||||
|
// Use only 46 to allow the kernel a chance to fulfill the request.
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
// Restrict the address range on Android to avoid a large performance
|
||||||
|
// regression in single-process WebViews. See https://crbug.com/837640.
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_LINUX)
|
||||||
|
|
||||||
|
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
|
||||||
|
// page size and number of levels of translation pages used. We use
|
||||||
|
// 39-bit as base as all setups should support this, lowered to 38-bit
|
||||||
|
// as ASLROffset() could cause a carry.
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||||
|
// could cause a carry.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_PPC64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// AIX has 64 bits of virtual addressing, but we limit the address range
|
||||||
|
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
|
||||||
|
// extra address space to isolate the mmap regions.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x400000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(42);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390X)
|
||||||
|
|
||||||
|
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
|
||||||
|
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
|
||||||
|
// chance to fulfill the request.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(40);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
|
||||||
|
// a chance to fulfill the request.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(29);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// For all other POSIX variants, use 30 bits.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_SOLARIS)
|
||||||
|
|
||||||
|
// For our Solaris/illumos mmap hint, we pick a random address in the
|
||||||
|
// bottom half of the top half of the address space (that is, the third
|
||||||
|
// quarter). Because we do not MAP_FIXED, this will be treated only as a
|
||||||
|
// hint -- the system will not fail to mmap because something else
|
||||||
|
// happens to already be mapped at our random address. We deliberately
|
||||||
|
// set the hint high enough to get well above the system's break (that
|
||||||
|
// is, the heap); Solaris and illumos will try the hint and if that
|
||||||
|
// fails allocate as if there were no hint at all. The high hint
|
||||||
|
// prevents the break from getting hemmed in at low values, ceding half
|
||||||
|
// of the address space to the system heap.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x80000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
|
||||||
|
// upper range.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x90000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
|
||||||
|
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
|
||||||
|
// 10.6 and 10.7.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_POSIX)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// This is a good range on 32-bit Windows and Android (the only platforms on
|
||||||
|
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
|
||||||
|
// is no issue with carries here.
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#error Please tell us about your exotic hardware! Sounds interesting.
|
||||||
|
|
||||||
|
#endif // defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_RANDOMIZATION_H_
|
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// All members are measured in super pages.
|
||||||
|
struct PoolStats {
|
||||||
|
size_t usage = 0;
|
||||||
|
|
||||||
|
// On 32-bit, pools are mainly logical entities, intermingled with
|
||||||
|
// allocations not managed by PartitionAlloc. The "largest available
|
||||||
|
// reservation" is not possible to measure in that case.
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
size_t largest_available_reservation = 0;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AddressSpaceStats {
|
||||||
|
PoolStats regular_pool_stats;
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
PoolStats brp_pool_stats;
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
PoolStats configurable_pool_stats;
|
||||||
|
#else
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
size_t blocklist_size; // measured in super pages
|
||||||
|
size_t blocklist_hit_count;
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||||
|
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
|
||||||
|
PoolStats thread_isolated_pool_stats;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Interface passed to `AddressPoolManager::DumpStats()` to mediate
|
||||||
|
// for `AddressSpaceDumpProvider`.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
|
||||||
|
public:
|
||||||
|
virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
|
||||||
|
virtual ~AddressSpaceStatsDumper() = default;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ADDRESS_SPACE_STATS_H_
|
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/allocation_guard.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
|
||||||
|
#if PA_CONFIG(HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
thread_local bool g_disallow_allocations;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::ScopedDisallowAllocations() {
|
||||||
|
if (g_disallow_allocations) {
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
g_disallow_allocations = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::~ScopedDisallowAllocations() {
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::ScopedAllowAllocations() {
|
||||||
|
// Save the previous value, as ScopedAllowAllocations is used in all
|
||||||
|
// partitions, not just the malloc() ones(s).
|
||||||
|
saved_value_ = g_disallow_allocations;
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::~ScopedAllowAllocations() {
|
||||||
|
g_disallow_allocations = saved_value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // PA_CONFIG(HAS_ALLOCATION_GUARD)
|
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if PA_CONFIG(HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedDisallowAllocations();
|
||||||
|
~ScopedDisallowAllocations();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedAllowAllocations();
|
||||||
|
~ScopedAllowAllocations();
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool saved_value_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
struct [[maybe_unused]] ScopedDisallowAllocations {};
|
||||||
|
struct [[maybe_unused]] ScopedAllowAllocations {};
|
||||||
|
|
||||||
|
#endif // PA_CONFIG(HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
using ::partition_alloc::ScopedAllowAllocations;
|
||||||
|
using ::partition_alloc::ScopedDisallowAllocations;
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ALLOCATION_GUARD_H_
|
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright 2021 The Chromium Authors
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
# This file contains a test function for checking Arm's branch target
|
||||||
|
# identification (BTI) feature, which helps mitigate jump-oriented
|
||||||
|
# programming. To get it working, BTI instructions must be executed
|
||||||
|
# on a compatible core, and the executable pages must be mapped with
|
||||||
|
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
|
||||||
|
# correctly:
|
||||||
|
# 1) Allocate a read-write page.
|
||||||
|
# 2) Copy between the start and end symbols into that page.
|
||||||
|
# 3) Set the page to read-execute with PROT_BTI.
|
||||||
|
# 4) Call the first offset of the page, verify the result.
|
||||||
|
# 5) Call the second offset of the page (skipping the landing pad).
|
||||||
|
# Verify that it crashes as expected.
|
||||||
|
# This test works irrespective of whether BTI is enabled for C/C++
|
||||||
|
# objects via -mbranch-protection=standard.
|
||||||
|
|
||||||
|
.text
|
||||||
|
.global arm_bti_test_function
|
||||||
|
.global arm_bti_test_function_invalid_offset
|
||||||
|
.global arm_bti_test_function_end
|
||||||
|
arm_bti_test_function:
|
||||||
|
# Mark the start of this function as a valid call target.
|
||||||
|
bti jc
|
||||||
|
add x0, x0, #1
|
||||||
|
arm_bti_test_function_invalid_offset:
|
||||||
|
# This label simulates calling an incomplete function.
|
||||||
|
# Jumping here should crash systems which support BTI.
|
||||||
|
add x0, x0, #2
|
||||||
|
ret
|
||||||
|
arm_bti_test_function_end:
|
||||||
|
nop
|
||||||
|
|
||||||
|
// For details see section "6.2 Program Property" in
|
||||||
|
// "ELF for the Arm 64-bit Architecture (AArch64)"
|
||||||
|
// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property
|
||||||
|
.pushsection .note.gnu.property, "a";
|
||||||
|
.balign 8;
|
||||||
|
.long 4;
|
||||||
|
.long 0x10;
|
||||||
|
.long 0x5;
|
||||||
|
.asciz "GNU";
|
||||||
|
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
|
||||||
|
.long 4;
|
||||||
|
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
|
||||||
|
.long 0;
|
||||||
|
.popsection
|
||||||
|
|
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_ARM64)
|
||||||
|
extern "C" {
|
||||||
|
/**
|
||||||
|
* A valid BTI function. Jumping to this funtion should not cause any problem in
|
||||||
|
* a BTI enabled environment.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A function without proper BTI landing pad. Jumping here should crash the
|
||||||
|
* program on systems which support BTI.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function_invalid_offset(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A simple function which immediately returns to sender.
|
||||||
|
**/
|
||||||
|
void arm_bti_test_function_end(void);
|
||||||
|
}
|
||||||
|
#endif // defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ARM_BTI_TEST_FUNCTIONS_H_
|
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/compressed_pointer.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// We keep the useful part in |g_base_| as 1s to speed up decompression.
|
||||||
|
alignas(kPartitionCachelineSize)
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) CompressedPointerBaseGlobal::Base
|
||||||
|
CompressedPointerBaseGlobal::g_base_ = {.base = kUsefulBitsMask};
|
||||||
|
|
||||||
|
void CompressedPointerBaseGlobal::SetBase(uintptr_t base) {
|
||||||
|
PA_DCHECK(!IsSet());
|
||||||
|
PA_DCHECK((base & kUsefulBitsMask) == 0);
|
||||||
|
g_base_.base = base | kUsefulBitsMask;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompressedPointerBaseGlobal::ResetBaseForTesting() {
|
||||||
|
g_base_.base = kUsefulBitsMask;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
@ -0,0 +1,668 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
|
#include <climits>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_address_space.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
#if !BUILDFLAG(GLUE_CORE_POOLS)
|
||||||
|
#error "Pointer compression only works with glued pools"
|
||||||
|
#endif
|
||||||
|
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||||
|
#error "Pointer compression currently supports constant pool size"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
template <typename T1, typename T2>
|
||||||
|
constexpr bool IsDecayedSame =
|
||||||
|
std::is_same_v<std::decay_t<T1>, std::decay_t<T2>>;
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
// Pointer compression works by storing only the 'useful' 32-bit part of the
|
||||||
|
// pointer. The other half (the base) is stored in a global variable
|
||||||
|
// (CompressedPointerBaseGlobal::g_base_), which is used on decompression. To
|
||||||
|
// support fast branchless decompression of nullptr, we use the most significant
|
||||||
|
// bit in the compressed pointer to leverage sign-extension (for non-nullptr
|
||||||
|
// pointers, the most significant bit is set, whereas for nullptr it's not).
|
||||||
|
// Using this bit and supporting heaps larger than 4GB relies on having
|
||||||
|
// alignment bits in pointers. Assuming that all pointers point to at least
|
||||||
|
// 8-byte alignment objects, pointer compression can support heaps of size <=
|
||||||
|
// 16GB.
|
||||||
|
// ((3 alignment bits) = (1 bit for sign-extension) + (2 bits for 16GB heap)).
|
||||||
|
//
|
||||||
|
// Example: heap base: 0x4b0'ffffffff
|
||||||
|
// - g_base: 0x4b3'ffffffff (lower 34 bits set)
|
||||||
|
// - normal pointer: 0x4b2'a08b6480
|
||||||
|
// - compression:
|
||||||
|
// - shift right by 3: 0x96'54116c90
|
||||||
|
// - truncate: 0x54116c90
|
||||||
|
// - mark MSB: 0xd4116c90
|
||||||
|
// - decompression:
|
||||||
|
// - sign-extend: 0xffffffff'd4116c90
|
||||||
|
// - shift left by 3: 0xfffffffe'a08b6480
|
||||||
|
// - 'and' with g_base: 0x000004b2'a08b6480
|
||||||
|
//
|
||||||
|
// - nullptr: 0x00000000'00000000
|
||||||
|
// - compression:
|
||||||
|
// - shift right by 3: 0x00000000'00000000
|
||||||
|
// - truncate: 0x00000000
|
||||||
|
// - (don't mark MSB for nullptr)
|
||||||
|
// - decompression:
|
||||||
|
// - sign-extend: 0x00000000'00000000
|
||||||
|
// - shift left by 3: 0x00000000'00000000
|
||||||
|
// - 'and' with g_base: 0x00000000'00000000
|
||||||
|
//
|
||||||
|
// Pointer compression relies on having both the regular and the BRP pool (core
|
||||||
|
// pools) 'glued', so that the same base could be used for both. For simplicity,
|
||||||
|
// the configurations with dynamically selected pool size are not supported.
|
||||||
|
// However, they can be at the cost of performing an extra load for
|
||||||
|
// core-pools-shift-size on both compression and decompression.
|
||||||
|
|
||||||
|
class CompressedPointerBaseGlobal final {
|
||||||
|
public:
|
||||||
|
static constexpr size_t kUsefulBits =
|
||||||
|
std::countr_zero(PartitionAddressSpace::CorePoolsSize());
|
||||||
|
static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT);
|
||||||
|
static constexpr size_t kBitsToShift =
|
||||||
|
kUsefulBits - sizeof(uint32_t) * CHAR_BIT;
|
||||||
|
|
||||||
|
CompressedPointerBaseGlobal() = delete;
|
||||||
|
|
||||||
|
// Attribute const allows the compiler to assume that
|
||||||
|
// CompressedPointerBaseGlobal::g_base_ doesn't change (e.g. across calls) and
|
||||||
|
// thereby avoid redundant loads.
|
||||||
|
PA_ALWAYS_INLINE __attribute__((const)) static uintptr_t Get() {
|
||||||
|
PA_DCHECK(IsBaseConsistent());
|
||||||
|
return g_base_.base;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE static bool IsSet() {
|
||||||
|
PA_DCHECK(IsBaseConsistent());
|
||||||
|
return (g_base_.base & ~kUsefulBitsMask) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr uintptr_t kUsefulBitsMask =
|
||||||
|
PartitionAddressSpace::CorePoolsSize() - 1;
|
||||||
|
|
||||||
|
static union alignas(kPartitionCachelineSize)
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base {
|
||||||
|
uintptr_t base;
|
||||||
|
char cache_line[kPartitionCachelineSize];
|
||||||
|
} g_base_ PA_CONSTINIT;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE static bool IsBaseConsistent() {
|
||||||
|
return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SetBase(uintptr_t base);
|
||||||
|
static void ResetBaseForTesting();
|
||||||
|
|
||||||
|
friend class PartitionAddressSpace;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class PA_TRIVIAL_ABI CompressedPointer final {
|
||||||
|
public:
|
||||||
|
using UnderlyingType = uint32_t;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer() = default;
|
||||||
|
PA_ALWAYS_INLINE explicit CompressedPointer(T* ptr) : value_(Compress(ptr)) {}
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit CompressedPointer(std::nullptr_t)
|
||||||
|
: value_(0u) {}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer(const CompressedPointer&) =
|
||||||
|
default;
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer(
|
||||||
|
CompressedPointer&& other) noexcept = default;
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer(
|
||||||
|
const CompressedPointer<U>& other) {
|
||||||
|
if constexpr (internal::IsDecayedSame<T, U>) {
|
||||||
|
// When pointers have the same type modulo constness, avoid the
|
||||||
|
// compress-decompress round.
|
||||||
|
value_ = other.value_;
|
||||||
|
} else {
|
||||||
|
// When the types are different, perform the round, because the pointer
|
||||||
|
// may need to be adjusted.
|
||||||
|
// TODO(1376980): Avoid the cycle here.
|
||||||
|
value_ = Compress(other.get());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer(
|
||||||
|
CompressedPointer<U>&& other) noexcept
|
||||||
|
: CompressedPointer(other) {}
|
||||||
|
|
||||||
|
~CompressedPointer() = default;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
|
||||||
|
const CompressedPointer&) = default;
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
|
||||||
|
CompressedPointer&& other) noexcept = default;
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
|
||||||
|
const CompressedPointer<U>& other) {
|
||||||
|
CompressedPointer copy(other);
|
||||||
|
value_ = copy.value_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(
|
||||||
|
CompressedPointer<U>&& other) noexcept {
|
||||||
|
*this = other;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't perform compression when assigning to nullptr.
|
||||||
|
PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(std::nullptr_t) {
|
||||||
|
value_ = 0u;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE T* get() const { return Decompress(value_); }
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return value_; }
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr UnderlyingType GetAsIntegral() const {
|
||||||
|
return value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit operator bool() const {
|
||||||
|
return is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U = T,
|
||||||
|
std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE U& operator*() const {
|
||||||
|
PA_DCHECK(is_nonnull());
|
||||||
|
return *get();
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE T* operator->() const {
|
||||||
|
PA_DCHECK(is_nonnull());
|
||||||
|
return get();
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& other) {
|
||||||
|
std::swap(value_, other.value_);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename>
|
||||||
|
friend class CompressedPointer;
|
||||||
|
|
||||||
|
static constexpr size_t kBitsForSignExtension = 1;
|
||||||
|
static constexpr size_t kOverallBitsToShift =
|
||||||
|
internal::CompressedPointerBaseGlobal::kBitsToShift +
|
||||||
|
kBitsForSignExtension;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) {
|
||||||
|
static constexpr size_t kMinimalRequiredAlignment = 8;
|
||||||
|
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
|
||||||
|
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(reinterpret_cast<uintptr_t>(ptr) % kMinimalRequiredAlignment ==
|
||||||
|
0);
|
||||||
|
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
|
||||||
|
|
||||||
|
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
|
||||||
|
static constexpr size_t kCorePoolsBaseMask =
|
||||||
|
~(internal::PartitionAddressSpace::CorePoolsSize() - 1);
|
||||||
|
PA_DCHECK(!ptr ||
|
||||||
|
(base & kCorePoolsBaseMask) ==
|
||||||
|
(reinterpret_cast<uintptr_t>(ptr) & kCorePoolsBaseMask));
|
||||||
|
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
|
||||||
|
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
|
||||||
|
// Shift the pointer and truncate.
|
||||||
|
auto compressed = static_cast<UnderlyingType>(uptr >> kOverallBitsToShift);
|
||||||
|
// If the pointer is non-null, mark the most-significant-bit to sign-extend
|
||||||
|
// it on decompression. Assuming compression is a significantly less
|
||||||
|
// frequent operation, we let more work here in favor of faster
|
||||||
|
// decompression.
|
||||||
|
// TODO(1376980): Avoid this by overreserving the heap.
|
||||||
|
if (compressed) {
|
||||||
|
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
return compressed;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) {
|
||||||
|
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
|
||||||
|
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
|
||||||
|
// Treat compressed pointer as signed and cast it to uint64_t, which will
|
||||||
|
// sign-extend it. Then, shift the result by one. It's important to shift
|
||||||
|
// the already unsigned value, as otherwise it would result in undefined
|
||||||
|
// behavior.
|
||||||
|
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr))
|
||||||
|
<< (kOverallBitsToShift);
|
||||||
|
return reinterpret_cast<T*>(mask & base);
|
||||||
|
}
|
||||||
|
|
||||||
|
UnderlyingType value_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr void swap(CompressedPointer<T>& a,
|
||||||
|
CompressedPointer<T>& b) {
|
||||||
|
a.swap(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators==.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE bool operator==(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
if constexpr (internal::IsDecayedSame<T, U>) {
|
||||||
|
// When pointers have the same type modulo constness, simply compare
|
||||||
|
// compressed values.
|
||||||
|
return a.GetAsIntegral() == b.GetAsIntegral();
|
||||||
|
} else {
|
||||||
|
// When the types are different, compare decompressed pointers, because the
|
||||||
|
// pointers may need to be adjusted.
|
||||||
|
// TODO(1376980): Avoid decompression here.
|
||||||
|
return a.get() == b.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a == static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(T* a, CompressedPointer<U> b) {
|
||||||
|
return b == a;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer<T> a,
|
||||||
|
std::nullptr_t) {
|
||||||
|
return !a.is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
return b == nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators!=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
return !(a == b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a != static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(T* a, CompressedPointer<U> b) {
|
||||||
|
return b != a;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer<T> a,
|
||||||
|
std::nullptr_t) {
|
||||||
|
return a.is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
return b != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators<.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
if constexpr (internal::IsDecayedSame<T, U>) {
|
||||||
|
// When pointers have the same type modulo constness, simply compare
|
||||||
|
// compressed values.
|
||||||
|
return a.GetAsIntegral() < b.GetAsIntegral();
|
||||||
|
} else {
|
||||||
|
// When the types are different, compare decompressed pointers, because the
|
||||||
|
// pointers may need to be adjusted.
|
||||||
|
// TODO(1376980): Avoid decompression here.
|
||||||
|
return a.get() < b.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a < static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(T* a, CompressedPointer<U> b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return static_cast<CompressedPointer<T>>(a) < b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators<=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
if constexpr (internal::IsDecayedSame<T, U>) {
|
||||||
|
// When pointers have the same type modulo constness, simply compare
|
||||||
|
// compressed values.
|
||||||
|
return a.GetAsIntegral() <= b.GetAsIntegral();
|
||||||
|
} else {
|
||||||
|
// When the types are different, compare decompressed pointers, because the
|
||||||
|
// pointers may need to be adjusted.
|
||||||
|
// TODO(1376980): Avoid decompression here.
|
||||||
|
return a.get() <= b.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a <= static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(T* a, CompressedPointer<U> b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return static_cast<CompressedPointer<T>>(a) <= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators>.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
return !(a <= b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a > static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(T* a, CompressedPointer<U> b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return static_cast<CompressedPointer<T>>(a) > b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators>=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a,
|
||||||
|
CompressedPointer<U> b) {
|
||||||
|
return !(a < b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer<T> a, U* b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return a >= static_cast<CompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer<U> b) {
|
||||||
|
// Do compression, since it is less expensive.
|
||||||
|
return static_cast<CompressedPointer<T>>(a) >= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
|
||||||
|
|
||||||
|
// Simple wrapper over the raw pointer.
|
||||||
|
template <typename T>
|
||||||
|
class PA_TRIVIAL_ABI UncompressedPointer final {
|
||||||
|
public:
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer() = default;
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(T* ptr) : ptr_(ptr) {}
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(std::nullptr_t)
|
||||||
|
: ptr_(nullptr) {}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer(const UncompressedPointer&) =
|
||||||
|
default;
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer(
|
||||||
|
UncompressedPointer&& other) noexcept = default;
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
|
||||||
|
const UncompressedPointer<U>& other)
|
||||||
|
: ptr_(other.ptr_) {}
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
|
||||||
|
UncompressedPointer<U>&& other) noexcept
|
||||||
|
: ptr_(std::move(other.ptr_)) {}
|
||||||
|
|
||||||
|
~UncompressedPointer() = default;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
|
||||||
|
const UncompressedPointer&) = default;
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
|
||||||
|
UncompressedPointer&& other) noexcept = default;
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
|
||||||
|
const UncompressedPointer<U>& other) {
|
||||||
|
ptr_ = other.ptr_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U,
|
||||||
|
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(
|
||||||
|
UncompressedPointer<U>&& other) noexcept {
|
||||||
|
ptr_ = std::move(other.ptr_);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(std::nullptr_t) {
|
||||||
|
ptr_ = nullptr;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr T* get() const { return ptr_; }
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return ptr_; }
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit operator bool() const {
|
||||||
|
return is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U = T,
|
||||||
|
std::enable_if_t<!std::is_void_v<std::remove_cv_t<U>>>* = nullptr>
|
||||||
|
PA_ALWAYS_INLINE constexpr U& operator*() const {
|
||||||
|
PA_DCHECK(is_nonnull());
|
||||||
|
return *get();
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr T* operator->() const {
|
||||||
|
PA_DCHECK(is_nonnull());
|
||||||
|
return get();
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& other) {
|
||||||
|
std::swap(ptr_, other.ptr_);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename>
|
||||||
|
friend class UncompressedPointer;
|
||||||
|
|
||||||
|
T* ptr_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer<T>& a,
|
||||||
|
UncompressedPointer<T>& b) {
|
||||||
|
a.swap(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators==.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return a.get() == b.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a == static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(T* a, UncompressedPointer<U> b) {
|
||||||
|
return b == a;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer<T> a,
|
||||||
|
std::nullptr_t) {
|
||||||
|
return !a.is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return b == nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators!=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return !(a == b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a != static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(T* a, UncompressedPointer<U> b) {
|
||||||
|
return b != a;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer<T> a,
|
||||||
|
std::nullptr_t) {
|
||||||
|
return a.is_nonnull();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return b != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators<.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return a.get() < b.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a < static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<(T* a, UncompressedPointer<U> b) {
|
||||||
|
return static_cast<UncompressedPointer<T>>(a) < b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators<=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return a.get() <= b.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a <= static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator<=(T* a, UncompressedPointer<U> b) {
|
||||||
|
return static_cast<UncompressedPointer<T>>(a) <= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators>.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return !(a <= b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a > static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>(T* a, UncompressedPointer<U> b) {
|
||||||
|
return static_cast<UncompressedPointer<T>>(a) > b;
|
||||||
|
}
|
||||||
|
|
||||||
|
// operators>=.
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a,
|
||||||
|
UncompressedPointer<U> b) {
|
||||||
|
return !(a < b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer<T> a, U* b) {
|
||||||
|
return a >= static_cast<UncompressedPointer<U>>(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename U>
|
||||||
|
PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer<U> b) {
|
||||||
|
return static_cast<UncompressedPointer<T>>(a) >= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_COMPRESSED_POINTER_H_
|
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/dangling_raw_ptr_checks.h"
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
|
||||||
|
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
|
||||||
|
DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
|
||||||
|
[](uintptr_t) {};
|
||||||
|
bool g_unretained_dangling_raw_ptr_check_enabled = false;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_detected_fn);
|
||||||
|
return g_dangling_raw_ptr_detected_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_released_fn);
|
||||||
|
return g_dangling_raw_ptr_released_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_detected_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_released_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
|
||||||
|
return g_unretained_dangling_raw_ptr_detected_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_unretained_dangling_raw_ptr_detected_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
|
||||||
|
bool old = g_unretained_dangling_raw_ptr_check_enabled;
|
||||||
|
g_unretained_dangling_raw_ptr_check_enabled = enabled;
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_detected_fn(id);
|
||||||
|
}
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_released_fn(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void UnretainedDanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
g_unretained_dangling_raw_ptr_detected_fn(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
bool IsUnretainedDanglingRawPtrCheckEnabled() {
|
||||||
|
return g_unretained_dangling_raw_ptr_check_enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
|
||||||
|
// raw_ptr are reported. Its behavior can be configured here.
|
||||||
|
//
|
||||||
|
// Purpose of this level of indirection:
|
||||||
|
// - Ease testing.
|
||||||
|
// - Keep partition_alloc/ independent from base/. In most cases, when a
|
||||||
|
// dangling raw_ptr is detected/released, this involves recording a
|
||||||
|
// base::debug::StackTrace, which isn't desirable inside partition_alloc/.
|
||||||
|
// - Be able (potentially) to turn this feature on/off at runtime based on
|
||||||
|
// dependant's flags.
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
|
||||||
|
// memory region and the allocator is asked to release it.
|
||||||
|
//
|
||||||
|
// It won't be called again with the same `id`, up until (potentially) a call to
|
||||||
|
// DanglingRawPtrReleased(`id`) is made.
|
||||||
|
//
|
||||||
|
// This function is called from within the allocator, and is not allowed to
|
||||||
|
// allocate memory.
|
||||||
|
using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
|
||||||
|
|
||||||
|
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
|
||||||
|
// last dangling raw_ptr stops referencing the memory region.
|
||||||
|
//
|
||||||
|
// This function is allowed to allocate memory.
|
||||||
|
using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void UnretainedDanglingRawPtrDetected(uintptr_t id);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
bool IsUnretainedDanglingRawPtrCheckEnabled();
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_DANGLING_RAW_PTR_CHECKS_H_
|
@ -0,0 +1,33 @@
|
|||||||
|
digraph {
|
||||||
|
node[shape=box]
|
||||||
|
edge[dir=both]
|
||||||
|
compound = true
|
||||||
|
dpi = 192
|
||||||
|
nodesep = 0.91
|
||||||
|
// Allows aligning nodes in different subgraphs.
|
||||||
|
newrank = true
|
||||||
|
|
||||||
|
subgraph cluster_0 {
|
||||||
|
label = "Address Space"
|
||||||
|
reg[label="Regular Pool"]
|
||||||
|
brp[label="BRP Pool"]
|
||||||
|
add[label="Additional Pools"]
|
||||||
|
reg->brp->add[style=invis]
|
||||||
|
}
|
||||||
|
|
||||||
|
manager[label="AddressPoolManager"]
|
||||||
|
manager->reg[constraint=false]
|
||||||
|
manager->brp
|
||||||
|
manager->add[constraint=false]
|
||||||
|
|
||||||
|
subgraph cluster_1 {
|
||||||
|
label = "PartitionRoots"
|
||||||
|
pae[label="PA-E Root"]
|
||||||
|
blink[label="Blink Roots"]
|
||||||
|
etc[style=dotted, label="Other Roots"]
|
||||||
|
pae->blink->etc[style=invis]
|
||||||
|
}
|
||||||
|
|
||||||
|
manager->blink[lhead=cluster_1]
|
||||||
|
{rank=same manager brp blink}
|
||||||
|
}
|
Binary file not shown.
After Width: | Height: | Size: 41 KiB |
@ -0,0 +1,59 @@
|
|||||||
|
digraph {
|
||||||
|
node[shape=plaintext]
|
||||||
|
edge[style=dashed, color=crimson]
|
||||||
|
|
||||||
|
page1[label=<
|
||||||
|
<table border="0" cellborder="1" cellspacing="0"><tr>
|
||||||
|
<!-- head partition page -->
|
||||||
|
<td port="head" bgcolor="darkgrey" width="40" height="52"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="160"></td>
|
||||||
|
<!-- a slot span in this bucket -->
|
||||||
|
<td port="slotspan" bgcolor="crimson" width="80"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="320"></td>
|
||||||
|
<!-- tail partition page -->
|
||||||
|
<td bgcolor="darkgrey" width="40"></td>
|
||||||
|
</tr></table>
|
||||||
|
>]
|
||||||
|
page2[label=<
|
||||||
|
<table border="0" cellborder="1" cellspacing="0"><tr>
|
||||||
|
<!-- head partition page -->
|
||||||
|
<td port="head" bgcolor="darkgrey" width="40" height="52"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="280"></td>
|
||||||
|
<!-- a slot span in this bucket -->
|
||||||
|
<td port="slotspan" bgcolor="crimson" width="80"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="200"></td>
|
||||||
|
<!-- tail partition page -->
|
||||||
|
<td bgcolor="darkgrey" width="40"></td>
|
||||||
|
</tr></table>
|
||||||
|
>]
|
||||||
|
page3[label=<
|
||||||
|
<table border="0" cellborder="1" cellspacing="0"><tr>
|
||||||
|
<!-- head partition page -->
|
||||||
|
<td port="head" bgcolor="darkgrey" width="40" height="52"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="40"></td>
|
||||||
|
<!-- a slot span in this bucket -->
|
||||||
|
<td port="slotspan1" bgcolor="crimson" width="80"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="120"></td>
|
||||||
|
<!-- a slot span in this bucket -->
|
||||||
|
<td port="slotspan2" bgcolor="crimson" width="80"></td>
|
||||||
|
<!-- bucket-external memory - not depicted -->
|
||||||
|
<td width="240"></td>
|
||||||
|
<!-- tail partition page -->
|
||||||
|
<td bgcolor="darkgrey" width="40"></td>
|
||||||
|
</tr></table>
|
||||||
|
>]
|
||||||
|
|
||||||
|
// Invisibly link the head partition pages to force alignment.
|
||||||
|
page1:head->page2:head->page3:head[style=invis]
|
||||||
|
|
||||||
|
// Inter-super-page links disable constraints so to let the above
|
||||||
|
// fully control alignment.
|
||||||
|
page1:slotspan->page2:slotspan->page3:slotspan1[constraint=false]
|
||||||
|
page3:slotspan1:s->page3:slotspan2:sw
|
||||||
|
}
|
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
@ -0,0 +1,22 @@
|
|||||||
|
digraph G {
|
||||||
|
node[shape=box,style="filled,rounded",color=deepskyblue]
|
||||||
|
|
||||||
|
subgraph cluster_tc {
|
||||||
|
label = "Thread Cache"
|
||||||
|
rankdir = LR
|
||||||
|
{rank=same;TLS1,TLS2,TLSn}
|
||||||
|
TLS1->TLS2[style=invisible,dir=none]
|
||||||
|
TLS2->TLSn[style=dotted,dir=none]
|
||||||
|
}
|
||||||
|
|
||||||
|
subgraph cluster_central {
|
||||||
|
label = "Central Allocator (per-partition lock)"
|
||||||
|
fast[label="slot span freelists (fast path)"]
|
||||||
|
slow[label="slot span management (slow path)"]
|
||||||
|
# Forces slow path node beneath fast path node.
|
||||||
|
fast->slow[style=invisible,dir=none]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Forces thread-external subgraph beneath thread cache subgraph.
|
||||||
|
TLS2->fast[style=invisible,dir=none]
|
||||||
|
}
|
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
@ -0,0 +1,94 @@
|
|||||||
|
digraph G {
|
||||||
|
node[shape=plaintext]
|
||||||
|
edge[style=dashed]
|
||||||
|
|
||||||
|
invisible_a[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="red" WIDTH="100"></TD>
|
||||||
|
<TD PORT="green" WIDTH="20"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="40"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="300"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="60"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
superpage[xlabel="Super Page",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
|
||||||
|
<TR>
|
||||||
|
<!-- Head Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
|
||||||
|
<TD PORT="metadata"></TD>
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
|
||||||
|
<!-- Bitmaps -->
|
||||||
|
<TD WIDTH="100">Bitmaps(?)</TD>
|
||||||
|
<!-- Several Slot Spans -->
|
||||||
|
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
|
||||||
|
<TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
|
||||||
|
<TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
|
||||||
|
<TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
|
||||||
|
<TD WIDTH="79">...</TD>
|
||||||
|
<!-- Tail Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="39"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
invisible_b[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="green" WIDTH="30"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="60"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="180"></TD>
|
||||||
|
<TD PORT="red" WIDTH="90"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="90"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
metadata_page[xlabel="Metadata",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
<!-- Bitmaps Offset -->
|
||||||
|
<TD> B? </TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Green Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="palegreen">v</TD>
|
||||||
|
<!-- Blue Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="cornflowerblue">v</TD>
|
||||||
|
<TD BGCOLOR="cornflowerblue">+</TD>
|
||||||
|
<!-- Gold Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="gold">v</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Pink Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="deeppink">v</TD>
|
||||||
|
<!-- etc. -->
|
||||||
|
<TD WIDTH="64">...</TD>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
|
||||||
|
invisible_a:red->superpage:red->superpage:red2[color=crimson]
|
||||||
|
superpage:red2->invisible_b:red[color=crimson]
|
||||||
|
invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
|
||||||
|
invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
|
||||||
|
invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
|
||||||
|
invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
|
||||||
|
|
||||||
|
superpage:metadata->metadata_page[style="",arrowhead=odot]
|
||||||
|
}
|
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
@ -0,0 +1,320 @@
|
|||||||
|
// Copyright 2018 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/freeslot_bitmap.h"
|
||||||
|
#include "partition_alloc/in_slot_metadata.h"
|
||||||
|
#include "partition_alloc/partition_alloc-inl.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#if !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
#include "partition_alloc/reverse_bytes.h"
|
||||||
|
#endif // !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
class EncodedNextFreelistEntry;
|
||||||
|
|
||||||
|
class EncodedFreelistPtr {
|
||||||
|
private:
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit EncodedFreelistPtr(std::nullptr_t)
|
||||||
|
: encoded_(Transform(0)) {}
|
||||||
|
PA_ALWAYS_INLINE explicit EncodedFreelistPtr(void* ptr)
|
||||||
|
// The encoded pointer stays MTE-tagged.
|
||||||
|
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry* Decode() const {
|
||||||
|
return reinterpret_cast<EncodedNextFreelistEntry*>(Transform(encoded_));
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
|
||||||
|
encoded_ = encoded;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
|
||||||
|
|
||||||
|
// Transform() works the same in both directions, so can be used for
|
||||||
|
// encoding and decoding.
|
||||||
|
PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
|
||||||
|
// We use bswap on little endian as a fast transformation for two reasons:
|
||||||
|
// 1) On 64 bit architectures, the pointer is very unlikely to be a
|
||||||
|
// canonical address. Therefore, if an object is freed and its vtable is
|
||||||
|
// used where the attacker doesn't get the chance to run allocations
|
||||||
|
// between the free and use, the vtable dereference is likely to fault.
|
||||||
|
// 2) If the attacker has a linear buffer overflow and elects to try and
|
||||||
|
// corrupt a freelist pointer, partial pointer overwrite attacks are
|
||||||
|
// thwarted.
|
||||||
|
// For big endian, similar guarantees are arrived at with a negation.
|
||||||
|
#if defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
uintptr_t transformed = ~address;
|
||||||
|
#else
|
||||||
|
uintptr_t transformed = ReverseBytes(address);
|
||||||
|
#endif
|
||||||
|
return transformed;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t encoded_;
|
||||||
|
|
||||||
|
friend EncodedNextFreelistEntry;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Freelist entries are encoded for security reasons. See
|
||||||
|
// //base/allocator/partition_allocator/PartitionAlloc.md
|
||||||
|
// and |Transform()| for the rationale and mechanism, respectively.
|
||||||
|
class EncodedNextFreelistEntry {
|
||||||
|
private:
|
||||||
|
constexpr explicit EncodedNextFreelistEntry(std::nullptr_t)
|
||||||
|
: encoded_next_(EncodedFreelistPtr(nullptr))
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
,
|
||||||
|
shadow_(encoded_next_.Inverted())
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
}
|
||||||
|
explicit EncodedNextFreelistEntry(EncodedNextFreelistEntry* next)
|
||||||
|
: encoded_next_(EncodedFreelistPtr(next))
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
,
|
||||||
|
shadow_(encoded_next_.Inverted())
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
}
|
||||||
|
// For testing only.
|
||||||
|
EncodedNextFreelistEntry(void* next, bool make_shadow_match)
|
||||||
|
: encoded_next_(EncodedFreelistPtr(next))
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
,
|
||||||
|
shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
~EncodedNextFreelistEntry() = delete;
|
||||||
|
|
||||||
|
// Emplaces the freelist entry at the beginning of the given slot span, and
|
||||||
|
// initializes it as null-terminated.
|
||||||
|
PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
|
||||||
|
void* slot_start_tagged) {
|
||||||
|
// |slot_start_tagged| is MTE-tagged.
|
||||||
|
auto* entry = new (slot_start_tagged) EncodedNextFreelistEntry(nullptr);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
|
||||||
|
uintptr_t slot_start) {
|
||||||
|
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emplaces the freelist entry at the beginning of the given slot span, and
|
||||||
|
// initializes it with the given |next| pointer, but encoded.
|
||||||
|
//
|
||||||
|
// This freelist is built for the purpose of thread-cache. This means that we
|
||||||
|
// can't perform a check that this and the next pointer belong to the same
|
||||||
|
// super page, as thread-cache spans may chain slots across super pages.
|
||||||
|
PA_ALWAYS_INLINE static EncodedNextFreelistEntry*
|
||||||
|
EmplaceAndInitForThreadCache(uintptr_t slot_start,
|
||||||
|
EncodedNextFreelistEntry* next) {
|
||||||
|
auto* entry =
|
||||||
|
new (SlotStartAddr2Ptr(slot_start)) EncodedNextFreelistEntry(next);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emplaces the freelist entry at the beginning of the given slot span, and
|
||||||
|
// initializes it with the given |next| pointer.
|
||||||
|
//
|
||||||
|
// This is for testing purposes only! |make_shadow_match| allows you to choose
|
||||||
|
// if the shadow matches the next pointer properly or is trash.
|
||||||
|
PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
|
||||||
|
void* next,
|
||||||
|
bool make_shadow_match) {
|
||||||
|
new (SlotStartAddr2Ptr(slot_start))
|
||||||
|
EncodedNextFreelistEntry(next, make_shadow_match);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CorruptNextForTesting(uintptr_t v) {
|
||||||
|
// We just need a value that can never be a valid pointer here.
|
||||||
|
encoded_next_.Override(EncodedFreelistPtr::Transform(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Puts `slot_size` on the stack before crashing in case of memory
|
||||||
|
// corruption. Meant to be used to report the failed allocation size.
|
||||||
|
template <bool crash_on_corruption>
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache(
|
||||||
|
size_t slot_size) const;
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const;
|
||||||
|
|
||||||
|
PA_NOINLINE void CheckFreeList(size_t slot_size) const {
|
||||||
|
for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
|
||||||
|
// |GetNext()| checks freelist integrity.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOINLINE void CheckFreeListForThreadCache(size_t slot_size) const {
|
||||||
|
for (auto* entry = this; entry;
|
||||||
|
entry = entry->GetNextForThreadCache<true>(slot_size)) {
|
||||||
|
// |GetNextForThreadCache()| checks freelist integrity.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE void SetNext(EncodedNextFreelistEntry* entry) {
|
||||||
|
// SetNext() is either called on the freelist head, when provisioning new
|
||||||
|
// slots, or when GetNext() has been called before, no need to pass the
|
||||||
|
// size.
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
// Regular freelists always point to an entry within the same super page.
|
||||||
|
//
|
||||||
|
// This is most likely a PartitionAlloc bug if this triggers.
|
||||||
|
if (PA_UNLIKELY(entry &&
|
||||||
|
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
|
||||||
|
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
|
||||||
|
FreelistCorruptionDetected(0);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
|
||||||
|
encoded_next_ = EncodedFreelistPtr(entry);
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
shadow_ = encoded_next_.Inverted();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zeroes out |this| before returning the slot. The pointer to this memory
|
||||||
|
// will be returned to the user (caller of Alloc()), thus can't have internal
|
||||||
|
// data.
|
||||||
|
PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
|
||||||
|
encoded_next_.Override(0);
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
shadow_ = 0;
|
||||||
|
#endif
|
||||||
|
return SlotStartPtr2Addr(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
|
||||||
|
return !encoded_next_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <bool crash_on_corruption>
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal(
|
||||||
|
size_t slot_size,
|
||||||
|
bool for_thread_cache) const;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE static bool IsSane(const EncodedNextFreelistEntry* here,
|
||||||
|
const EncodedNextFreelistEntry* next,
|
||||||
|
bool for_thread_cache) {
|
||||||
|
// Don't allow the freelist to be blindly followed to any location.
|
||||||
|
// Checks two constraints:
|
||||||
|
// - here and next must belong to the same superpage, unless this is in the
|
||||||
|
// thread cache (they even always belong to the same slot span).
|
||||||
|
// - next cannot point inside the metadata area.
|
||||||
|
//
|
||||||
|
// Also, the lightweight UaF detection (pointer shadow) is checked.
|
||||||
|
|
||||||
|
uintptr_t here_address = SlotStartPtr2Addr(here);
|
||||||
|
uintptr_t next_address = SlotStartPtr2Addr(next);
|
||||||
|
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
|
||||||
|
#else
|
||||||
|
bool shadow_ptr_ok = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool same_superpage = (here_address & kSuperPageBaseMask) ==
|
||||||
|
(next_address & kSuperPageBaseMask);
|
||||||
|
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
bool marked_as_free_in_bitmap =
|
||||||
|
for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
|
||||||
|
#else
|
||||||
|
bool marked_as_free_in_bitmap = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// This is necessary but not sufficient when quarantine is enabled, see
|
||||||
|
// SuperPagePayloadBegin() in partition_page.h. However we don't want to
|
||||||
|
// fetch anything from the root in this function.
|
||||||
|
bool not_in_metadata =
|
||||||
|
(next_address & kSuperPageOffsetMask) >= PartitionPageSize();
|
||||||
|
|
||||||
|
if (for_thread_cache) {
|
||||||
|
return shadow_ptr_ok & not_in_metadata;
|
||||||
|
} else {
|
||||||
|
return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
|
||||||
|
not_in_metadata;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
EncodedFreelistPtr encoded_next_;
|
||||||
|
// This is intended to detect unintentional corruptions of the freelist.
|
||||||
|
// These can happen due to a Use-after-Free, or overflow of the previous
|
||||||
|
// allocation in the slot span.
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
uintptr_t shadow_;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
template <bool crash_on_corruption>
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
|
||||||
|
EncodedNextFreelistEntry::GetNextInternal(size_t slot_size,
|
||||||
|
bool for_thread_cache) const {
|
||||||
|
// GetNext() can be called on discarded memory, in which case |encoded_next_|
|
||||||
|
// is 0, and none of the checks apply. Don't prefetch nullptr either.
|
||||||
|
if (IsEncodedNextPtrZero()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto* ret = encoded_next_.Decode();
|
||||||
|
// We rely on constant propagation to remove the branches coming from
|
||||||
|
// |for_thread_cache|, since the argument is always a compile-time constant.
|
||||||
|
if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
|
||||||
|
if constexpr (crash_on_corruption) {
|
||||||
|
// Put the corrupted data on the stack, it may give us more information
|
||||||
|
// about what kind of corruption that was.
|
||||||
|
PA_DEBUG_DATA_ON_STACK("first",
|
||||||
|
static_cast<size_t>(encoded_next_.encoded_));
|
||||||
|
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||||
|
PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
|
||||||
|
#endif
|
||||||
|
FreelistCorruptionDetected(slot_size);
|
||||||
|
} else {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// In real-world profiles, the load of |encoded_next_| above is responsible
|
||||||
|
// for a large fraction of the allocation cost. However, we cannot anticipate
|
||||||
|
// it enough since it is accessed right after we know its address.
|
||||||
|
//
|
||||||
|
// In the case of repeated allocations, we can prefetch the access that will
|
||||||
|
// be done at the *next* allocation, which will touch *ret, prefetch it.
|
||||||
|
PA_PREFETCH(ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool crash_on_corruption>
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
|
||||||
|
EncodedNextFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
|
||||||
|
return GetNextInternal<crash_on_corruption>(slot_size, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE EncodedNextFreelistEntry* EncodedNextFreelistEntry::GetNext(
|
||||||
|
size_t slot_size) const {
|
||||||
|
return GetNextInternal<true>(slot_size, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_ENCODED_NEXT_FREELIST_H_
|
@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/extended_api.h"
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
#include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
#include "partition_alloc/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void DisableThreadCacheForRootIfEnabled(PartitionRoot* root) {
|
||||||
|
// Some platforms don't have a thread cache, or it could already have been
|
||||||
|
// disabled.
|
||||||
|
if (!root || !root->settings.with_thread_cache) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
root->settings.with_thread_cache = false;
|
||||||
|
// Doesn't destroy the thread cache object(s). For background threads, they
|
||||||
|
// will be collected (and free cached memory) at thread destruction
|
||||||
|
// time. For the main thread, we leak it.
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnablePartitionAllocThreadCacheForRootIfDisabled(PartitionRoot* root) {
|
||||||
|
if (!root) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
root->settings.with_thread_cache = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
void DisablePartitionAllocThreadCacheForProcess() {
|
||||||
|
PA_CHECK(allocator_shim::internal::PartitionAllocMalloc::
|
||||||
|
AllocatorConfigurationFinalized());
|
||||||
|
DisableThreadCacheForRootIfEnabled(
|
||||||
|
allocator_shim::internal::PartitionAllocMalloc::Allocator());
|
||||||
|
DisableThreadCacheForRootIfEnabled(
|
||||||
|
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
ThreadAllocStats GetAllocStatsForCurrentThread() {
|
||||||
|
ThreadCache* thread_cache = ThreadCache::Get();
|
||||||
|
if (ThreadCache::IsValid(thread_cache)) {
|
||||||
|
return thread_cache->thread_alloc_stats();
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
|
||||||
|
PartitionRoot* root)
|
||||||
|
: root_(root) {
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* regular_allocator =
|
||||||
|
allocator_shim::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
regular_was_enabled_ =
|
||||||
|
regular_allocator && regular_allocator->settings.with_thread_cache;
|
||||||
|
|
||||||
|
if (root_ != regular_allocator) {
|
||||||
|
// Another |root| is ThreadCache's PartitionRoot. Need to disable
|
||||||
|
// thread cache for the process.
|
||||||
|
DisablePartitionAllocThreadCacheForProcess();
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||||
|
// Replace ThreadCache's PartitionRoot.
|
||||||
|
ThreadCache::SwapForTesting(root_);
|
||||||
|
} else {
|
||||||
|
if (!regular_was_enabled_) {
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||||
|
ThreadCache::SwapForTesting(root_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||||
|
ThreadCache::SwapForTesting(root_);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
PA_CHECK(ThreadCache::Get());
|
||||||
|
}
|
||||||
|
|
||||||
|
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* regular_allocator =
|
||||||
|
allocator_shim::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
bool regular_enabled =
|
||||||
|
regular_allocator && regular_allocator->settings.with_thread_cache;
|
||||||
|
|
||||||
|
if (regular_was_enabled_) {
|
||||||
|
if (!regular_enabled) {
|
||||||
|
// Need to re-enable ThreadCache for the process.
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||||
|
// In the case, |regular_allocator| must be ThreadCache's root.
|
||||||
|
ThreadCache::SwapForTesting(regular_allocator);
|
||||||
|
} else {
|
||||||
|
// ThreadCache is enabled for the process, but we need to be
|
||||||
|
// careful about ThreadCache's PartitionRoot. If it is different from
|
||||||
|
// |regular_allocator|, we need to invoke SwapForTesting().
|
||||||
|
if (regular_allocator != root_) {
|
||||||
|
ThreadCache::SwapForTesting(regular_allocator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// ThreadCache for all processes was disabled.
|
||||||
|
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||||
|
ThreadCache::SwapForTesting(nullptr);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
// First, disable the test thread cache we have.
|
||||||
|
DisableThreadCacheForRootIfEnabled(root_);
|
||||||
|
|
||||||
|
ThreadCache::SwapForTesting(nullptr);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
}
|
||||||
|
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
#include "partition_alloc/partition_stats.h"
|
||||||
|
#include "partition_alloc/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
// Get allocation stats for the thread cache partition on the current
|
||||||
|
// thread. See the documentation of ThreadAllocStats for details.
|
||||||
|
ThreadAllocStats GetAllocStatsForCurrentThread();
|
||||||
|
|
||||||
|
// Creates a scope for testing which:
|
||||||
|
// - if the given |root| is a default malloc root for the entire process,
|
||||||
|
// enables the thread cache for the entire process.
|
||||||
|
// (This may happen if UsePartitionAllocAsMalloc is enabled.)
|
||||||
|
// - otherwise, disables the thread cache for the entire process, and
|
||||||
|
// replaces it with a thread cache for |root|.
|
||||||
|
// This class is unsafe to run if there are multiple threads running
|
||||||
|
// in the process.
|
||||||
|
class ThreadCacheProcessScopeForTesting {
|
||||||
|
public:
|
||||||
|
explicit ThreadCacheProcessScopeForTesting(PartitionRoot* root);
|
||||||
|
~ThreadCacheProcessScopeForTesting();
|
||||||
|
|
||||||
|
ThreadCacheProcessScopeForTesting() = delete;
|
||||||
|
|
||||||
|
private:
|
||||||
|
PartitionRoot* root_ = nullptr;
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
bool regular_was_enabled_ = false;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_EXTENDED_API_H_
|
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This header provides a type-safe way of storing OR-combinations of enum
|
||||||
|
// values.
|
||||||
|
//
|
||||||
|
// The traditional C++ approach for storing OR-combinations of enum values is to
|
||||||
|
// use an int or unsigned int variable. The inconvenience with this approach is
|
||||||
|
// that there's no type checking at all; any enum value can be OR'd with any
|
||||||
|
// other enum value and passed on to a function that takes an int or unsigned
|
||||||
|
// int.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
// Returns `T` if and only if `EnumType` is a scoped enum.
|
||||||
|
template <typename EnumType, typename T = EnumType>
|
||||||
|
using IfEnum = std::enable_if_t<
|
||||||
|
std::is_enum_v<EnumType> &&
|
||||||
|
!std::is_convertible_v<EnumType, std::underlying_type_t<EnumType>>,
|
||||||
|
T>;
|
||||||
|
|
||||||
|
// We assume `EnumType` defines `kMaxValue` which has the largest value and all
|
||||||
|
// powers of two are represented in `EnumType`.
|
||||||
|
template <typename EnumType>
|
||||||
|
constexpr inline EnumType kAllFlags = static_cast<IfEnum<EnumType>>(
|
||||||
|
(static_cast<std::underlying_type_t<EnumType>>(EnumType::kMaxValue) << 1) -
|
||||||
|
1);
|
||||||
|
|
||||||
|
template <typename EnumType>
|
||||||
|
constexpr inline IfEnum<EnumType, bool> AreValidFlags(EnumType flags) {
|
||||||
|
const auto raw_flags = static_cast<std::underlying_type_t<EnumType>>(flags);
|
||||||
|
const auto raw_all_flags =
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>);
|
||||||
|
return (raw_flags & ~raw_all_flags) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks `subset` is a subset of `superset` or not.
|
||||||
|
template <typename EnumType>
|
||||||
|
constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
|
||||||
|
EnumType subset) {
|
||||||
|
return (superset & subset) == subset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes flags `target` from `from`.
|
||||||
|
template <typename EnumType>
|
||||||
|
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
|
||||||
|
return from & ~target;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A macro to define binary arithmetic over `EnumType`.
|
||||||
|
// Use inside `namespace partition_alloc::internal`.
|
||||||
|
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \
|
||||||
|
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator&( \
|
||||||
|
const EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
return static_cast<EnumType>( \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(lhs) & \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] inline constexpr EnumType& operator&=( \
|
||||||
|
EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
lhs = lhs & rhs; \
|
||||||
|
return lhs; \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator|( \
|
||||||
|
const EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
return static_cast<EnumType>( \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(lhs) | \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] inline constexpr EnumType& operator|=( \
|
||||||
|
EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
lhs = lhs | rhs; \
|
||||||
|
return lhs; \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator^( \
|
||||||
|
const EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
return static_cast<EnumType>( \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(lhs) ^ \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] inline constexpr EnumType& operator^=( \
|
||||||
|
EnumType& lhs, const EnumType& rhs) { \
|
||||||
|
lhs = lhs ^ rhs; \
|
||||||
|
return lhs; \
|
||||||
|
} \
|
||||||
|
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator~( \
|
||||||
|
const EnumType& val) { \
|
||||||
|
return static_cast<EnumType>( \
|
||||||
|
static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>) & \
|
||||||
|
~static_cast<std::underlying_type_t<EnumType>>(val)); \
|
||||||
|
} \
|
||||||
|
static_assert(true) /* semicolon here */
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FLAGS_H_
|
@ -0,0 +1,141 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
|
||||||
|
|
||||||
|
#include <climits>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "partition_alloc/freeslot_bitmap_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/bits.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE uintptr_t GetFreeSlotBitmapAddressForPointer(uintptr_t ptr) {
|
||||||
|
uintptr_t super_page = ptr & kSuperPageBaseMask;
|
||||||
|
return SuperPageFreeSlotBitmapAddr(super_page);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculates the cell address and the offset inside the cell corresponding to
|
||||||
|
// the |slot_start|.
|
||||||
|
PA_ALWAYS_INLINE std::pair<FreeSlotBitmapCellType*, size_t>
|
||||||
|
GetFreeSlotBitmapCellPtrAndBitIndex(uintptr_t slot_start) {
|
||||||
|
uintptr_t slot_superpage_offset = slot_start & kSuperPageOffsetMask;
|
||||||
|
uintptr_t superpage_bitmap_start =
|
||||||
|
GetFreeSlotBitmapAddressForPointer(slot_start);
|
||||||
|
uintptr_t cell_addr = base::bits::AlignDown(
|
||||||
|
superpage_bitmap_start +
|
||||||
|
(slot_superpage_offset / kSmallestBucket) / CHAR_BIT,
|
||||||
|
sizeof(FreeSlotBitmapCellType));
|
||||||
|
PA_DCHECK(cell_addr < superpage_bitmap_start + kFreeSlotBitmapSize);
|
||||||
|
size_t bit_index =
|
||||||
|
(slot_superpage_offset / kSmallestBucket) & kFreeSlotBitmapOffsetMask;
|
||||||
|
PA_DCHECK(bit_index < kFreeSlotBitmapBitsPerCell);
|
||||||
|
return {reinterpret_cast<FreeSlotBitmapCellType*>(cell_addr), bit_index};
|
||||||
|
}
|
||||||
|
|
||||||
|
// This bitmap marks the used slot as 0 and free one as 1. This is because we
|
||||||
|
// would like to set all the slots as "used" by default to prevent allocating a
|
||||||
|
// used slot when the freelist entry is overwritten. The state of the bitmap is
|
||||||
|
// expected to be synced with freelist (i.e. the bitmap is set to 1 if and only
|
||||||
|
// if the slot is in the freelist).
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithAOne(size_t n) {
|
||||||
|
return static_cast<FreeSlotBitmapCellType>(1) << n;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE FreeSlotBitmapCellType CellWithTrailingOnes(size_t n) {
|
||||||
|
return (static_cast<FreeSlotBitmapCellType>(1) << n) -
|
||||||
|
static_cast<FreeSlotBitmapCellType>(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the bit corresponding to |slot_start| is used( = 0)
|
||||||
|
PA_ALWAYS_INLINE bool FreeSlotBitmapSlotIsUsed(uintptr_t slot_start) {
|
||||||
|
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
|
||||||
|
return (*cell & CellWithAOne(bit_index)) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark the bit corresponding to |slot_start| as used( = 0).
|
||||||
|
PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsUsed(uintptr_t slot_start) {
|
||||||
|
PA_CHECK(!FreeSlotBitmapSlotIsUsed(slot_start));
|
||||||
|
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
|
||||||
|
*cell &= ~CellWithAOne(bit_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark the bit corresponding to |slot_start| as free( = 1).
|
||||||
|
PA_ALWAYS_INLINE void FreeSlotBitmapMarkSlotAsFree(uintptr_t slot_start) {
|
||||||
|
PA_CHECK(FreeSlotBitmapSlotIsUsed(slot_start));
|
||||||
|
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
|
||||||
|
*cell |= CellWithAOne(bit_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resets (= set to 0) all the bits corresponding to the slot-start addresses
|
||||||
|
// within [begin_addr, end_addr). |begin_addr| has to be the beginning of a
|
||||||
|
// slot, but |end_addr| does not.
|
||||||
|
PA_ALWAYS_INLINE void FreeSlotBitmapReset(uintptr_t begin_addr,
|
||||||
|
uintptr_t end_addr,
|
||||||
|
uintptr_t slot_size) {
|
||||||
|
PA_DCHECK(begin_addr <= end_addr);
|
||||||
|
// |end_addr| has to be kSmallestBucket-aligned.
|
||||||
|
PA_DCHECK((end_addr & (kSmallestBucket - 1)) == 0u);
|
||||||
|
for (uintptr_t slot_start = begin_addr; slot_start < end_addr;
|
||||||
|
slot_start += slot_size) {
|
||||||
|
auto [cell, bit_index] = GetFreeSlotBitmapCellPtrAndBitIndex(slot_start);
|
||||||
|
*cell &= ~CellWithAOne(bit_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
// Checks if the cells that are meant to contain only unset bits are really 0.
|
||||||
|
auto [begin_cell, begin_bit_index] =
|
||||||
|
GetFreeSlotBitmapCellPtrAndBitIndex(begin_addr);
|
||||||
|
auto [end_cell, end_bit_index] =
|
||||||
|
GetFreeSlotBitmapCellPtrAndBitIndex(end_addr);
|
||||||
|
|
||||||
|
// The bits that should be marked to 0 are |begin_bit_index|th bit of
|
||||||
|
// |begin_cell| to |end_bit_index - 1|th bit of |end_cell|. We verify all the
|
||||||
|
// bits are set to 0 for the cells between [begin_cell + 1, end_cell). For the
|
||||||
|
// |begin_cell| and |end_cell|, we have to handle them separately to only
|
||||||
|
// check the partial bits.
|
||||||
|
// | begin_cell | |...| | end_cell |
|
||||||
|
// |11...100...0|0...0|...|0...0|0...01...1|
|
||||||
|
// ^ ^
|
||||||
|
// | |
|
||||||
|
// begin_addr end_addr
|
||||||
|
|
||||||
|
if (begin_cell == end_cell) {
|
||||||
|
PA_DCHECK((*begin_cell & (~CellWithTrailingOnes(begin_bit_index) &
|
||||||
|
CellWithTrailingOnes(end_bit_index))) == 0u);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (begin_bit_index != 0) {
|
||||||
|
// Checks the bits between [begin_bit_index, kFreeSlotBitmapBitsPerCell) in
|
||||||
|
// the begin_cell are 0
|
||||||
|
PA_DCHECK((*begin_cell & ~CellWithTrailingOnes(begin_bit_index)) == 0u);
|
||||||
|
++begin_cell;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (end_bit_index != 0) {
|
||||||
|
// Checks the bits between [0, end_bit_index) in the end_cell are 0
|
||||||
|
PA_DCHECK((*end_cell & CellWithTrailingOnes(end_bit_index)) == 0u);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (FreeSlotBitmapCellType* cell = begin_cell; cell < end_cell; ++cell) {
|
||||||
|
PA_DCHECK(*cell == 0u);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_H_
|
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/bits.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_forward.h"
|
||||||
|
#include "partition_alloc/reservation_offset_table.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
using FreeSlotBitmapCellType = uint64_t;
|
||||||
|
constexpr size_t kFreeSlotBitmapBitsPerCell =
|
||||||
|
sizeof(FreeSlotBitmapCellType) * CHAR_BIT;
|
||||||
|
constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
|
||||||
|
|
||||||
|
// The number of bits necessary for the bitmap is equal to the maximum number of
|
||||||
|
// slots in a super page.
|
||||||
|
constexpr size_t kFreeSlotBitmapSize =
|
||||||
|
(kSuperPageSize / kSmallestBucket) / CHAR_BIT;
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||||
|
ReservedFreeSlotBitmapSize() {
|
||||||
|
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||||
|
CommittedFreeSlotBitmapSize() {
|
||||||
|
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||||
|
NumPartitionPagesPerFreeSlotBitmap() {
|
||||||
|
return ReservedFreeSlotBitmapSize() / PartitionPageSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||||
|
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
|
||||||
|
PA_DCHECK(!(super_page % kSuperPageAlignment));
|
||||||
|
return super_page + PartitionPageSize();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_FREESLOT_BITMAP_CONSTANTS_H_
|
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/gwp_asan_support.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/freeslot_bitmap_constants.h"
|
||||||
|
#include "partition_alloc/in_slot_metadata.h"
|
||||||
|
#include "partition_alloc/page_allocator_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_bucket.h"
|
||||||
|
#include "partition_alloc/partition_lock.h"
|
||||||
|
#include "partition_alloc/partition_page.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// static
|
||||||
|
void* GwpAsanSupport::MapRegion(size_t slot_count,
|
||||||
|
std::vector<uint16_t>& free_list) {
|
||||||
|
PA_CHECK(slot_count > 0);
|
||||||
|
|
||||||
|
constexpr PartitionOptions kConfig = []() {
|
||||||
|
PartitionOptions opts;
|
||||||
|
opts.backup_ref_ptr = PartitionOptions::kEnabled;
|
||||||
|
return opts;
|
||||||
|
}();
|
||||||
|
static internal::base::NoDestructor<PartitionRoot> root(kConfig);
|
||||||
|
|
||||||
|
const size_t kSlotSize = 2 * internal::SystemPageSize();
|
||||||
|
uint16_t bucket_index = PartitionRoot::SizeToBucketIndex(
|
||||||
|
kSlotSize, root->GetBucketDistribution());
|
||||||
|
auto* bucket = root->buckets + bucket_index;
|
||||||
|
|
||||||
|
const size_t kSuperPagePayloadStartOffset =
|
||||||
|
internal::SuperPagePayloadStartOffset(
|
||||||
|
/* is_managed_by_normal_buckets = */ true,
|
||||||
|
/* with_quarantine = */ false);
|
||||||
|
PA_CHECK(kSuperPagePayloadStartOffset % kSlotSize == 0);
|
||||||
|
const size_t kSuperPageGwpAsanSlotAreaBeginOffset =
|
||||||
|
kSuperPagePayloadStartOffset;
|
||||||
|
const size_t kSuperPageGwpAsanSlotAreaEndOffset =
|
||||||
|
internal::SuperPagePayloadEndOffset();
|
||||||
|
const size_t kSuperPageGwpAsanSlotAreaSize =
|
||||||
|
kSuperPageGwpAsanSlotAreaEndOffset - kSuperPageGwpAsanSlotAreaBeginOffset;
|
||||||
|
const size_t kSlotsPerSlotSpan = bucket->get_bytes_per_span() / kSlotSize;
|
||||||
|
const size_t kSlotsPerSuperPage =
|
||||||
|
kSuperPageGwpAsanSlotAreaSize / (kSlotsPerSlotSpan * kSlotSize);
|
||||||
|
|
||||||
|
size_t super_page_count = 1 + ((slot_count - 1) / kSlotsPerSuperPage);
|
||||||
|
PA_CHECK(super_page_count <=
|
||||||
|
std::numeric_limits<size_t>::max() / kSuperPageSize);
|
||||||
|
uintptr_t super_page_span_start;
|
||||||
|
{
|
||||||
|
internal::ScopedGuard locker{internal::PartitionRootLock(root.get())};
|
||||||
|
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
|
||||||
|
root.get(), super_page_count, AllocFlags::kNone);
|
||||||
|
|
||||||
|
if (!super_page_span_start) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
// Mapping the GWP-ASan region in to the lower 32-bits of address space
|
||||||
|
// makes it much more likely that a bad pointer dereference points into
|
||||||
|
// our region and triggers a false positive report. We rely on the fact
|
||||||
|
// that PA address pools are never allocated in the first 4GB due to
|
||||||
|
// their alignment requirements.
|
||||||
|
PA_CHECK(super_page_span_start >= (1ULL << 32));
|
||||||
|
#endif // defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
uintptr_t super_page_span_end =
|
||||||
|
super_page_span_start + super_page_count * kSuperPageSize;
|
||||||
|
PA_CHECK(super_page_span_start < super_page_span_end);
|
||||||
|
|
||||||
|
for (uintptr_t super_page = super_page_span_start;
|
||||||
|
super_page < super_page_span_end; super_page += kSuperPageSize) {
|
||||||
|
auto* page_metadata =
|
||||||
|
internal::PartitionSuperPageToMetadataArea(super_page);
|
||||||
|
|
||||||
|
// Index 0 is invalid because it is the super page extent metadata.
|
||||||
|
for (size_t partition_page_idx =
|
||||||
|
1 + internal::NumPartitionPagesPerFreeSlotBitmap();
|
||||||
|
partition_page_idx + bucket->get_pages_per_slot_span() <
|
||||||
|
internal::NumPartitionPagesPerSuperPage();
|
||||||
|
partition_page_idx += bucket->get_pages_per_slot_span()) {
|
||||||
|
auto* slot_span_metadata =
|
||||||
|
&page_metadata[partition_page_idx].slot_span_metadata;
|
||||||
|
bucket->InitializeSlotSpanForGwpAsan(slot_span_metadata);
|
||||||
|
auto slot_span_start =
|
||||||
|
internal::SlotSpanMetadata::ToSlotSpanStart(slot_span_metadata);
|
||||||
|
|
||||||
|
for (uintptr_t slot_idx = 0; slot_idx < kSlotsPerSlotSpan; ++slot_idx) {
|
||||||
|
auto slot_start = slot_span_start + slot_idx * kSlotSize;
|
||||||
|
PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
|
||||||
|
kSlotSize)
|
||||||
|
->InitalizeForGwpAsan();
|
||||||
|
size_t global_slot_idx = (slot_start - super_page_span_start -
|
||||||
|
kSuperPageGwpAsanSlotAreaBeginOffset) /
|
||||||
|
kSlotSize;
|
||||||
|
PA_DCHECK(global_slot_idx < std::numeric_limits<uint16_t>::max());
|
||||||
|
free_list.push_back(global_slot_idx);
|
||||||
|
if (free_list.size() == slot_count) {
|
||||||
|
return reinterpret_cast<void*>(
|
||||||
|
super_page_span_start + kSuperPageGwpAsanSlotAreaBeginOffset -
|
||||||
|
internal::SystemPageSize()); // Depends on the PA guard region
|
||||||
|
// in front of the super page
|
||||||
|
// payload area.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOTREACHED();
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
bool GwpAsanSupport::CanReuse(uintptr_t slot_start) {
|
||||||
|
const size_t kSlotSize = 2 * internal::SystemPageSize();
|
||||||
|
return PartitionRoot::InSlotMetadataPointerFromSlotStartAndSize(slot_start,
|
||||||
|
kSlotSize)
|
||||||
|
->CanBeReusedByGwpAsan();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
|
@ -0,0 +1,120 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// This class allows GWP-ASan allocations to be backed by PartitionAlloc and,
|
||||||
|
// consequently, protected by MiraclePtr.
|
||||||
|
//
|
||||||
|
// GWP-ASan mainly operates at the system memory page granularity. During
|
||||||
|
// process startup, it reserves a certain number of consecutive system pages.
|
||||||
|
//
|
||||||
|
// The standard layout is as follows:
|
||||||
|
//
|
||||||
|
// +-------------------+--------
|
||||||
|
// | | ▲ ▲
|
||||||
|
// | system page 0 |(a) (c)
|
||||||
|
// | | ▼ ▼
|
||||||
|
// +-------------------+--------
|
||||||
|
// | | ▲ ▲
|
||||||
|
// | system page 1 |(b) |
|
||||||
|
// | | ▼ |
|
||||||
|
// +-------------------+--- (d) (a) inaccessible
|
||||||
|
// | | ▲ | (b) accessible
|
||||||
|
// | system page 2 |(a) | (c) initial guard page
|
||||||
|
// | | ▼ ▼ (d) allocation slot
|
||||||
|
// +-------------------+--------
|
||||||
|
// | | ▲ ▲
|
||||||
|
// | system page 3 |(b) |
|
||||||
|
// | | ▼ |
|
||||||
|
// +-------------------+--- (d)
|
||||||
|
// | | ▲ |
|
||||||
|
// | system page 4 |(a) |
|
||||||
|
// | | ▼ ▼
|
||||||
|
// |-------------------|--------
|
||||||
|
// | | ▲ ▲
|
||||||
|
// | ... |(a) (d)
|
||||||
|
//
|
||||||
|
// Unfortunately, PartitionAlloc can't provide GWP-ASan an arbitrary number of
|
||||||
|
// consecutive allocation slots. Allocations need to be grouped into 2MB super
|
||||||
|
// pages so that the allocation metadata can be easily located.
|
||||||
|
//
|
||||||
|
// Below is the new layout:
|
||||||
|
//
|
||||||
|
// +-----------------------------------
|
||||||
|
// | | ▲ ▲
|
||||||
|
// | system page 0 | | |
|
||||||
|
// | | | |
|
||||||
|
// +-------------------+ | |
|
||||||
|
// | | | |
|
||||||
|
// | ... | (e) |
|
||||||
|
// | | | |
|
||||||
|
// +-------------------+------- | |
|
||||||
|
// | | ▲ ▲ | |
|
||||||
|
// | system page k-1 |(a) (c) | |
|
||||||
|
// | | ▼ ▼ ▼ |
|
||||||
|
// +-------------------+----------- (f)
|
||||||
|
// | | ▲ ▲ |
|
||||||
|
// | system page k |(b) | |
|
||||||
|
// | | ▼ | |
|
||||||
|
// +-------------------+--- (d) |
|
||||||
|
// | | ▲ | |
|
||||||
|
// | system page k+1 |(a) | |
|
||||||
|
// | | ▼ ▼ |
|
||||||
|
// +-------------------+----------- |
|
||||||
|
// | | | (a) inaccessible
|
||||||
|
// | ... | | (b) accessible
|
||||||
|
// | | ▼ (c) initial guard page
|
||||||
|
// +----------------------------------- (d) allocation slot
|
||||||
|
// | | ▲ ▲ (e) super page metadata
|
||||||
|
// | system page m | | | (f) super page
|
||||||
|
// | | | | (g) pseudo allocation slot
|
||||||
|
// +-------------------+------- | |
|
||||||
|
// | | ▲ | |
|
||||||
|
// | ... | | (e) |
|
||||||
|
// | | | | |
|
||||||
|
// +-------------------+--- (g) | |
|
||||||
|
// | | ▲ | | |
|
||||||
|
// | system page m+k-1 |(a) | | |
|
||||||
|
// | | ▼ ▼ ▼ |
|
||||||
|
// +-------------------+----------- (f)
|
||||||
|
// | | ▲ ▲ |
|
||||||
|
// | system page m+k |(b) | |
|
||||||
|
// | | ▼ | |
|
||||||
|
// +-------------------+--- (d) |
|
||||||
|
// | | ▲ | |
|
||||||
|
// | system page m+k+1 |(a) | |
|
||||||
|
// | | ▼ ▼ |
|
||||||
|
// +-------------------+----------- |
|
||||||
|
// | | |
|
||||||
|
// | ... | |
|
||||||
|
// | | ▼
|
||||||
|
// +-------------------+---------------
|
||||||
|
//
|
||||||
|
// This means some allocation slots will be reserved to hold PA
|
||||||
|
// metadata. We exclude these pseudo slots from the GWP-ASan free list so that
|
||||||
|
// they are never used for anything other that storing the metadata.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) GwpAsanSupport {
|
||||||
|
public:
|
||||||
|
static void* MapRegion(size_t slot_count, std::vector<uint16_t>& free_list);
|
||||||
|
static bool CanReuse(uintptr_t slot_start);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_GWP_ASAN_SUPPORT)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_GWP_ASAN_SUPPORT_H_
|
@ -0,0 +1,584 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_IN_SLOT_METADATA_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_IN_SLOT_METADATA_H_
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <bit>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "partition_alloc/dangling_raw_ptr_checks.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
#include "partition_alloc/partition_alloc_constants.h"
|
||||||
|
#include "partition_alloc/partition_alloc_forward.h"
|
||||||
|
#include "partition_alloc/tagging.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include "partition_alloc/partition_alloc_base/bits.h"
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// Aligns up (on 8B boundary) `in_slot_metadata_size` on Mac as a workaround for
|
||||||
|
// crash. Workaround was introduced for MacOS 13: https://crbug.com/1378822. But
|
||||||
|
// it has been enabled by default because MacOS 14 and later seems to need it
|
||||||
|
// too. https://crbug.com/1457756
|
||||||
|
// Enabled on iOS as a workaround for a speculative bug in Swift's
|
||||||
|
// __StringStorage.create https://crbug.com/327804972
|
||||||
|
//
|
||||||
|
// Placed outside `BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)`
|
||||||
|
// intentionally to accommodate usage in contexts also outside
|
||||||
|
// this gating.
|
||||||
|
PA_ALWAYS_INLINE size_t
|
||||||
|
AlignUpInSlotMetadataSizeForApple(size_t in_slot_metadata_size) {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
return internal::base::bits::AlignUp<size_t>(in_slot_metadata_size, 8);
|
||||||
|
#else
|
||||||
|
return in_slot_metadata_size;
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
// Utility functions to define a bit field.
|
||||||
|
template <typename CountType>
|
||||||
|
static constexpr CountType SafeShift(CountType lhs, int rhs) {
|
||||||
|
return rhs >= std::numeric_limits<CountType>::digits ? 0 : lhs << rhs;
|
||||||
|
}
|
||||||
|
template <typename CountType>
|
||||||
|
struct BitField {
|
||||||
|
static constexpr CountType None() { return CountType(0); }
|
||||||
|
static constexpr CountType Bit(int n_th) {
|
||||||
|
return SafeShift<CountType>(1, n_th);
|
||||||
|
}
|
||||||
|
// Mask with bits between `lo` and `hi` (both inclusive) set.
|
||||||
|
static constexpr CountType Mask(int lo, int hi) {
|
||||||
|
return (SafeShift<CountType>(1, hi + 1) - 1) &
|
||||||
|
~(SafeShift<CountType>(1, lo) - 1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// Special-purpose atomic bit field class mainly used by RawPtrBackupRefImpl.
|
||||||
|
// Formerly known as `PartitionRefCount`, but renamed to support usage that is
|
||||||
|
// unrelated to BRP.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) InSlotMetadata {
|
||||||
|
public:
|
||||||
|
// This class holds an atomic 32 bits field: `count_`. It holds 4 values:
|
||||||
|
//
|
||||||
|
// bits name description
|
||||||
|
// ----- --------------------- ----------------------------------------
|
||||||
|
// 0 is_allocated Whether or not the memory is held by the
|
||||||
|
// allocator.
|
||||||
|
// - 1 at construction time.
|
||||||
|
// - Decreased in ReleaseFromAllocator();
|
||||||
|
// - We check whether this bit is set in
|
||||||
|
// `ReleaseFromAllocator()`, and if not we
|
||||||
|
// have a double-free.
|
||||||
|
//
|
||||||
|
// 1-29 ptr_count Number of raw_ptr<T>.
|
||||||
|
// - Increased in Acquire()
|
||||||
|
// - Decreased in Release()
|
||||||
|
//
|
||||||
|
// 30 request_quarantine When set, PA will quarantine the memory in
|
||||||
|
// Scheduler-Loop quarantine.
|
||||||
|
// It also extends quarantine duration when
|
||||||
|
// set after being quarantined.
|
||||||
|
// 31 needs_mac11_malloc_ Whether malloc_size() return value needs to
|
||||||
|
// size_hack be adjusted for this allocation.
|
||||||
|
//
|
||||||
|
// On `BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)` builds, it holds two more
|
||||||
|
// entries in total of 64 bits.
|
||||||
|
//
|
||||||
|
// bits name description
|
||||||
|
// ----- --------------------- ----------------------------------------
|
||||||
|
// 0 is_allocated
|
||||||
|
// 1-31 ptr_count
|
||||||
|
//
|
||||||
|
// 32 dangling_detected A dangling raw_ptr<> has been detected.
|
||||||
|
// 33 needs_mac11_malloc_
|
||||||
|
// size_hack
|
||||||
|
// 34 request_quarantine
|
||||||
|
//
|
||||||
|
// 35-63 unprotected_ptr_count Number of
|
||||||
|
// raw_ptr<T, DisableDanglingPtrDetection>
|
||||||
|
// - Increased in AcquireFromUnprotectedPtr().
|
||||||
|
// - Decreased in ReleaseFromUnprotectedPtr().
|
||||||
|
//
|
||||||
|
// The allocation is reclaimed if all of:
|
||||||
|
// - |is_allocated|
|
||||||
|
// - |ptr_count|
|
||||||
|
// - |unprotected_ptr_count|
|
||||||
|
// are zero.
|
||||||
|
//
|
||||||
|
// During ReleaseFromAllocator(), if |ptr_count| is not zero,
|
||||||
|
// |dangling_detected| is set and the error is reported via
|
||||||
|
// DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
|
||||||
|
// called when the last raw_ptr<> is released.
|
||||||
|
#if !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
using CountType = uint32_t;
|
||||||
|
static constexpr CountType kMemoryHeldByAllocatorBit =
|
||||||
|
BitField<CountType>::Bit(0);
|
||||||
|
static constexpr CountType kPtrCountMask = BitField<CountType>::Mask(1, 29);
|
||||||
|
static constexpr CountType kRequestQuarantineBit =
|
||||||
|
BitField<CountType>::Bit(30);
|
||||||
|
static constexpr CountType kNeedsMac11MallocSizeHackBit =
|
||||||
|
BitField<CountType>::Bit(31);
|
||||||
|
static constexpr CountType kDanglingRawPtrDetectedBit =
|
||||||
|
BitField<CountType>::None();
|
||||||
|
static constexpr CountType kUnprotectedPtrCountMask =
|
||||||
|
BitField<CountType>::None();
|
||||||
|
#else // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
using CountType = uint64_t;
|
||||||
|
static constexpr auto kMemoryHeldByAllocatorBit = BitField<CountType>::Bit(0);
|
||||||
|
static constexpr auto kPtrCountMask = BitField<CountType>::Mask(1, 31);
|
||||||
|
static constexpr auto kDanglingRawPtrDetectedBit =
|
||||||
|
BitField<CountType>::Bit(32);
|
||||||
|
static constexpr auto kNeedsMac11MallocSizeHackBit =
|
||||||
|
BitField<CountType>::Bit(33);
|
||||||
|
static constexpr CountType kRequestQuarantineBit =
|
||||||
|
BitField<CountType>::Bit(34);
|
||||||
|
static constexpr auto kUnprotectedPtrCountMask =
|
||||||
|
BitField<CountType>::Mask(35, 63);
|
||||||
|
#endif // !BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
// Quick check to assert these masks do not overlap.
|
||||||
|
static_assert((kMemoryHeldByAllocatorBit + kPtrCountMask +
|
||||||
|
kUnprotectedPtrCountMask + kDanglingRawPtrDetectedBit +
|
||||||
|
kRequestQuarantineBit + kNeedsMac11MallocSizeHackBit) ==
|
||||||
|
std::numeric_limits<CountType>::max());
|
||||||
|
|
||||||
|
static constexpr auto kPtrInc =
|
||||||
|
SafeShift<CountType>(1, std::countr_zero(kPtrCountMask));
|
||||||
|
static constexpr auto kUnprotectedPtrInc =
|
||||||
|
SafeShift<CountType>(1, std::countr_zero(kUnprotectedPtrCountMask));
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE explicit InSlotMetadata(bool needs_mac11_malloc_size_hack);
|
||||||
|
|
||||||
|
// Incrementing the counter doesn't imply any visibility about modified
|
||||||
|
// memory, hence relaxed atomics. For decrement, visibility is required before
|
||||||
|
// the memory gets freed, necessitating an acquire/release barrier before
|
||||||
|
// freeing the memory.
|
||||||
|
//
|
||||||
|
// For details, see base::AtomicRefCount, which has the same constraints and
|
||||||
|
// characteristics.
|
||||||
|
//
|
||||||
|
// FYI: The assembly produced by the compiler on every platform, in particular
|
||||||
|
// the uint64_t fetch_add on 32bit CPU.
|
||||||
|
// https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
|
||||||
|
PA_ALWAYS_INLINE void Acquire() {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
|
||||||
|
CountType old_count = count_.fetch_add(kPtrInc, std::memory_order_relaxed);
|
||||||
|
// Check overflow.
|
||||||
|
PA_CHECK((old_count & kPtrCountMask) != kPtrCountMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
|
||||||
|
// instead of raw_ptr<T>.
|
||||||
|
PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
|
||||||
|
// Check overflow.
|
||||||
|
PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
|
||||||
|
kUnprotectedPtrCountMask);
|
||||||
|
#else
|
||||||
|
Acquire();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the allocation should be reclaimed.
|
||||||
|
PA_ALWAYS_INLINE bool Release() {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
|
||||||
|
CountType old_count = count_.fetch_sub(kPtrInc, std::memory_order_release);
|
||||||
|
// Check underflow.
|
||||||
|
PA_DCHECK(old_count & kPtrCountMask);
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
// If a dangling raw_ptr<> was detected, report it.
|
||||||
|
if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
|
||||||
|
kDanglingRawPtrDetectedBit)) {
|
||||||
|
partition_alloc::internal::DanglingRawPtrReleased(
|
||||||
|
reinterpret_cast<uintptr_t>(this));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return ReleaseCommon(old_count - kPtrInc);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
|
||||||
|
// instead of raw_ptr<T>.
|
||||||
|
PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
|
||||||
|
// Check underflow.
|
||||||
|
PA_DCHECK(old_count & kUnprotectedPtrCountMask);
|
||||||
|
|
||||||
|
return ReleaseCommon(old_count - kUnprotectedPtrInc);
|
||||||
|
#else
|
||||||
|
return Release();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// `PreReleaseFromAllocator()` performs what `ReleaseFromAllocator()` does
|
||||||
|
// partially in a way that supports multiple calls.
|
||||||
|
// This function can be used when allocation is sent to quarantine to perform
|
||||||
|
// dangling `raw_ptr` checks before quarantine, not after.
|
||||||
|
PA_ALWAYS_INLINE void PreReleaseFromAllocator() {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
CheckDanglingPointersOnFree(count_.load(std::memory_order_relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the allocation should be reclaimed.
|
||||||
|
// This function should be called by the allocator during Free().
|
||||||
|
PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
|
||||||
|
// TODO(bartekn): Make the double-free check more effective. Once freed, the
|
||||||
|
// in-slot metadata is overwritten by an encoded freelist-next pointer.
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
|
||||||
|
|
||||||
|
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
|
||||||
|
DoubleFreeOrCorruptionDetected(old_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release memory when no raw_ptr<> exists anymore:
|
||||||
|
static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
|
||||||
|
if (PA_LIKELY((old_count & mask) == 0)) {
|
||||||
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
|
// The allocation is about to get freed, so clear the cookie.
|
||||||
|
ClearCookieIfSupported();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckDanglingPointersOnFree(old_count);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// "IsAlive" means is allocated and not freed. "KnownRefs" refers to
|
||||||
|
// raw_ptr<T> references. There may be other references from raw pointers or
|
||||||
|
// unique_ptr, but we have no way of tracking them, so we hope for the best.
|
||||||
|
// To summarize, the function returns whether we believe the allocation can be
|
||||||
|
// safely freed.
|
||||||
|
PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
static constexpr CountType mask =
|
||||||
|
kMemoryHeldByAllocatorBit | kPtrCountMask | kUnprotectedPtrCountMask;
|
||||||
|
return (count_.load(std::memory_order_acquire) & mask) ==
|
||||||
|
kMemoryHeldByAllocatorBit;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool IsAlive() {
|
||||||
|
bool alive =
|
||||||
|
count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
|
||||||
|
if (alive) {
|
||||||
|
CheckCookieIfSupported();
|
||||||
|
}
|
||||||
|
return alive;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when a raw_ptr is not banning dangling ptrs, but the user still
|
||||||
|
// wants to ensure the pointer is not currently dangling. This is currently
|
||||||
|
// used in UnretainedWrapper to make sure callbacks are not invoked with
|
||||||
|
// dangling pointers. If such a raw_ptr exists but the allocation is no longer
|
||||||
|
// alive, then we have a dangling pointer to a dead object.
|
||||||
|
PA_ALWAYS_INLINE void ReportIfDangling() {
|
||||||
|
if (!IsAlive()) {
|
||||||
|
partition_alloc::internal::UnretainedDanglingRawPtrDetected(
|
||||||
|
reinterpret_cast<uintptr_t>(this));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request to quarantine this allocation. The request might be ignored if
|
||||||
|
// the allocation is already freed.
|
||||||
|
PA_ALWAYS_INLINE void SetQuarantineRequest() {
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_or(kRequestQuarantineBit, std::memory_order_relaxed);
|
||||||
|
// This bit cannot be used after the memory is freed.
|
||||||
|
PA_DCHECK(old_count & kMemoryHeldByAllocatorBit);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get and clear out quarantine request.
|
||||||
|
PA_ALWAYS_INLINE bool PopQuarantineRequest() {
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_and(~kRequestQuarantineBit, std::memory_order_acq_rel);
|
||||||
|
// This bit cannot be used after the memory is freed.
|
||||||
|
PA_DCHECK(old_count & kMemoryHeldByAllocatorBit);
|
||||||
|
return old_count & kRequestQuarantineBit;
|
||||||
|
}
|
||||||
|
|
||||||
|
// GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
|
||||||
|
// make sure the `raw_ptr<T>` release operation will never attempt to call the
|
||||||
|
// PA `free` on such a slot. GWP-ASan takes the extra reference into account
|
||||||
|
// when determining whether the slot can be reused.
|
||||||
|
PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
brp_cookie_ = CalculateCookie();
|
||||||
|
#endif
|
||||||
|
count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
|
||||||
|
std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
|
||||||
|
static constexpr CountType mask = kPtrCountMask | kUnprotectedPtrCountMask;
|
||||||
|
return (count_.load(std::memory_order_acquire) & mask) == kPtrInc;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NeedsMac11MallocSizeHack() {
|
||||||
|
return count_.load(std::memory_order_relaxed) &
|
||||||
|
kNeedsMac11MallocSizeHackBit;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
|
||||||
|
requested_size_ = static_cast<uint32_t>(size);
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
|
||||||
|
#endif // PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
|
||||||
|
private:
|
||||||
|
// If there are some dangling raw_ptr<>. Turn on the error flag, and
|
||||||
|
// emit the `DanglingPtrDetected` once to embedders.
|
||||||
|
PA_ALWAYS_INLINE void CheckDanglingPointersOnFree(CountType count) {
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
// The `kPtrCountMask` counts the number of raw_ptr<T>. It is expected to be
|
||||||
|
// zero when there are no unexpected dangling pointers.
|
||||||
|
if (PA_LIKELY((count & kPtrCountMask) == 0)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two events are sent to embedders:
|
||||||
|
// 1. `DanglingRawPtrDetected` - Here
|
||||||
|
// 2. `DanglingRawPtrReleased` - In Release().
|
||||||
|
//
|
||||||
|
// The `dangling_detected` bit signals we must emit the second during
|
||||||
|
// `Release().
|
||||||
|
CountType old_count =
|
||||||
|
count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
|
||||||
|
|
||||||
|
// This function supports multiple calls. `DanglingRawPtrDetected` must be
|
||||||
|
// called only once. So only the first caller setting the bit can continue.
|
||||||
|
if ((old_count & kDanglingRawPtrDetectedBit) ==
|
||||||
|
kDanglingRawPtrDetectedBit) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
partition_alloc::internal::DanglingRawPtrDetected(
|
||||||
|
reinterpret_cast<uintptr_t>(this));
|
||||||
|
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The common parts shared by Release() and ReleaseFromUnprotectedPtr().
|
||||||
|
// Called after updating the ref counts, |count| is the new value of |count_|
|
||||||
|
// set by fetch_sub. Returns true if memory can be reclaimed.
|
||||||
|
PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
|
||||||
|
// Do not release memory, if it is still held by any of:
|
||||||
|
// - The allocator
|
||||||
|
// - A raw_ptr<T>
|
||||||
|
// - A raw_ptr<T, DisableDanglingPtrDetection>
|
||||||
|
//
|
||||||
|
// Assuming this raw_ptr is not dangling, the memory must still be held at
|
||||||
|
// least by the allocator, so this is PA_LIKELY true.
|
||||||
|
if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
|
||||||
|
kUnprotectedPtrCountMask)))) {
|
||||||
|
return false; // Do not release the memory.
|
||||||
|
}
|
||||||
|
|
||||||
|
// In most thread-safe reference count implementations, an acquire
|
||||||
|
// barrier is required so that all changes made to an object from other
|
||||||
|
// threads are visible to its destructor. In our case, the destructor
|
||||||
|
// finishes before the final `Release` call, so it shouldn't be a problem.
|
||||||
|
// However, we will keep it as a precautionary measure.
|
||||||
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
|
|
||||||
|
// The allocation is about to get freed, so clear the cookie.
|
||||||
|
ClearCookieIfSupported();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The cookie helps us ensure that:
|
||||||
|
// 1) The reference count pointer calculation is correct.
|
||||||
|
// 2) The returned allocation slot is not freed.
|
||||||
|
PA_ALWAYS_INLINE void CheckCookieIfSupported() {
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
PA_CHECK(brp_cookie_ == CalculateCookie());
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE void ClearCookieIfSupported() {
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
brp_cookie_ = 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
PA_ALWAYS_INLINE uint32_t CalculateCookie() {
|
||||||
|
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
|
||||||
|
kCookieSalt;
|
||||||
|
}
|
||||||
|
#endif // PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
|
||||||
|
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
|
||||||
|
DoubleFreeOrCorruptionDetected(CountType count) {
|
||||||
|
PA_DEBUG_DATA_ON_STACK("refcount", count);
|
||||||
|
PA_NO_CODE_FOLDING();
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that in free slots, this is overwritten by encoded freelist
|
||||||
|
// pointer(s). The way the pointers are encoded on 64-bit little-endian
|
||||||
|
// architectures, count_ happens stay even, which works well with the
|
||||||
|
// double-free-detection in ReleaseFromAllocator(). Don't change the layout of
|
||||||
|
// this class, to preserve this functionality.
|
||||||
|
std::atomic<CountType> count_;
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
static constexpr uint32_t kCookieSalt = 0xc01dbeef;
|
||||||
|
volatile uint32_t brp_cookie_;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
uint32_t requested_size_;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE InSlotMetadata::InSlotMetadata(
|
||||||
|
bool needs_mac11_malloc_size_hack)
|
||||||
|
: count_(kMemoryHeldByAllocatorBit |
|
||||||
|
(needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE)
|
||||||
|
,
|
||||||
|
brp_cookie_(CalculateCookie())
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(kAlignment % alignof(InSlotMetadata) == 0,
|
||||||
|
"kAlignment must be multiples of alignof(InSlotMetadata).");
|
||||||
|
|
||||||
|
static constexpr size_t kInSlotMetadataBufferSize = sizeof(InSlotMetadata);
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) || \
|
||||||
|
PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
static constexpr size_t kInSlotMetadataSizeShift = 4;
|
||||||
|
#else
|
||||||
|
static constexpr size_t kInSlotMetadataSizeShift = 3;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
#if PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) && \
|
||||||
|
PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
static constexpr size_t kInSlotMetadataSizeShift = 4;
|
||||||
|
#elif PA_CONFIG(IN_SLOT_METADATA_CHECK_COOKIE) || \
|
||||||
|
PA_CONFIG(IN_SLOT_METADATA_STORE_REQUESTED_SIZE)
|
||||||
|
static constexpr size_t kInSlotMetadataSizeShift = 3;
|
||||||
|
#else
|
||||||
|
static constexpr size_t kInSlotMetadataSizeShift = 2;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // PA_CONFIG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
static_assert((1 << kInSlotMetadataSizeShift) == sizeof(InSlotMetadata));
|
||||||
|
|
||||||
|
// The in-slot metadata table is tucked in the metadata region of the super
|
||||||
|
// page, and spans a single system page.
|
||||||
|
//
|
||||||
|
// We need one InSlotMetadata for each data system page in a super page. They
|
||||||
|
// take `x = sizeof(InSlotMetadata) * (kSuperPageSize / SystemPageSize())`
|
||||||
|
// space. They need to fit into a system page of metadata as sparsely as
|
||||||
|
// possible to minimize cache line sharing, hence we calculate a multiplier as
|
||||||
|
// `SystemPageSize() / x` which is equal to
|
||||||
|
// `SystemPageSize()^2 / kSuperPageSize / sizeof(InSlotMetadata)`.
|
||||||
|
//
|
||||||
|
// The multiplier is expressed as a bitshift to optimize the code generation.
|
||||||
|
// SystemPageSize() isn't always a constrexpr, in which case the compiler
|
||||||
|
// wouldn't know it's a power of two. The equivalence of these calculations is
|
||||||
|
// checked in PartitionAllocGlobalInit().
|
||||||
|
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||||
|
GetInSlotMetadataIndexMultiplierShift() {
|
||||||
|
return SystemPageShift() * 2 - kSuperPageShift - kInSlotMetadataSizeShift;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE InSlotMetadata* InSlotMetadataPointer(
|
||||||
|
uintptr_t slot_start,
|
||||||
|
size_t slot_size,
|
||||||
|
bool in_slot_metadata_in_same_slot) {
|
||||||
|
// In the "previous slot" mode, in-slot metadatas that would be on a different
|
||||||
|
// page than their corresponding slot are instead placed in the super page
|
||||||
|
// metadata area. This is done so that they don't interfere with discarding of
|
||||||
|
// data pages.
|
||||||
|
//
|
||||||
|
// In the "same slot" mode, we have a handful of other issues:
|
||||||
|
// 1. GWP-ASan uses 2-page slots and wants the 2nd page to be inaccissable, so
|
||||||
|
// putting an in-slot metadata there is a no-go.
|
||||||
|
// 2. When direct map is reallocated in-place, it's `slot_size` may change and
|
||||||
|
// pages can be (de)committed. This would force in-slot metadata
|
||||||
|
// relocation, which in turn could cause a race with in-slot metadata
|
||||||
|
// access.
|
||||||
|
// 3. For single-slot spans, the unused pages between `GetUtilizedSlotSize()`
|
||||||
|
// and `slot_size` may be discarded thus interfering with the in-slot
|
||||||
|
// metadata.
|
||||||
|
// All of the above happen to have `slot_start` at the page boundary, so we
|
||||||
|
// can reuse the "previous slot" mode code.
|
||||||
|
if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
|
||||||
|
uintptr_t refcount_address =
|
||||||
|
slot_start + (in_slot_metadata_in_same_slot ? slot_size : 0) -
|
||||||
|
sizeof(InSlotMetadata);
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||||
|
PA_CHECK(refcount_address % alignof(InSlotMetadata) == 0);
|
||||||
|
#endif
|
||||||
|
// In theory, no need to MTE-tag in the "previous slot" mode, because
|
||||||
|
// in-slot metadata isn't protected by MTE. But it doesn't hurt to do so,
|
||||||
|
// and helps us avoid a branch (plus, can't easily #include partition_root.h
|
||||||
|
// here, due to cyclic dependencies).
|
||||||
|
// TODO(bartekn): Plumb the tag from the callers, so that it can be included
|
||||||
|
// in the calculations, and not re-read from memory.
|
||||||
|
return static_cast<InSlotMetadata*>(TagAddr(refcount_address));
|
||||||
|
} else {
|
||||||
|
// No need to MTE-tag, as the metadata region isn't protected by MTE.
|
||||||
|
InSlotMetadata* table_base = reinterpret_cast<InSlotMetadata*>(
|
||||||
|
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
|
||||||
|
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
|
||||||
|
<< GetInSlotMetadataIndexMultiplierShift();
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||||
|
PA_CHECK(sizeof(InSlotMetadata) * index <= SystemPageSize());
|
||||||
|
#endif
|
||||||
|
return table_base + index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(sizeof(InSlotMetadata) <= kInSlotMetadataBufferSize,
|
||||||
|
"InSlotMetadata should fit into the in-slot buffer.");
|
||||||
|
|
||||||
|
#else // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|
||||||
|
static constexpr size_t kInSlotMetadataBufferSize = 0;
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||||
|
|
||||||
|
constexpr size_t kInSlotMetadataSizeAdjustment = kInSlotMetadataBufferSize;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_IN_SLOT_METADATA_H_
|
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2024 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/internal_allocator.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
PartitionRoot& InternalAllocatorRoot() {
|
||||||
|
static internal::base::NoDestructor<PartitionRoot> allocator([]() {
|
||||||
|
// Disable features using the internal root to avoid reentrancy issue.
|
||||||
|
PartitionOptions opts;
|
||||||
|
opts.thread_cache = PartitionOptions::kDisabled;
|
||||||
|
opts.scheduler_loop_quarantine = PartitionOptions::kDisabled;
|
||||||
|
return opts;
|
||||||
|
}());
|
||||||
|
|
||||||
|
return *allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
void* InternalPartitionAllocated::operator new(size_t count) {
|
||||||
|
return InternalAllocatorRoot().Alloc<AllocFlags::kNoHooks>(count);
|
||||||
|
}
|
||||||
|
// static
|
||||||
|
void* InternalPartitionAllocated::operator new(size_t count,
|
||||||
|
std::align_val_t alignment) {
|
||||||
|
return InternalAllocatorRoot().AlignedAlloc<AllocFlags::kNoHooks>(
|
||||||
|
static_cast<size_t>(alignment), count);
|
||||||
|
}
|
||||||
|
// static
|
||||||
|
void InternalPartitionAllocated::operator delete(void* ptr) {
|
||||||
|
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
|
||||||
|
}
|
||||||
|
// static
|
||||||
|
void InternalPartitionAllocated::operator delete(void* ptr, std::align_val_t) {
|
||||||
|
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// A deleter for `std::unique_ptr<T>`.
|
||||||
|
void InternalPartitionDeleter::operator()(void* ptr) const {
|
||||||
|
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
|
||||||
|
}
|
||||||
|
} // namespace partition_alloc::internal
|
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2024 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_H_
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "partition_alloc/internal_allocator_forward.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
|
||||||
|
// Internal Allocator can be used to get heap allocations required to
|
||||||
|
// implement PartitionAlloc's feature.
|
||||||
|
// As Internal Allocator being PartitionAlloc with minimal configuration,
|
||||||
|
// it is not allowed to use this allocator for PA's core implementation to avoid
|
||||||
|
// reentrancy issues. Also don't use this when satisfying the very first PA-E
|
||||||
|
// allocation of the process.
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
PartitionRoot& InternalAllocatorRoot();
|
||||||
|
|
||||||
|
// A class that meets C++ named requirements, Allocator.
|
||||||
|
template <typename T>
|
||||||
|
InternalAllocator<T>::value_type* InternalAllocator<T>::allocate(
|
||||||
|
std::size_t count) {
|
||||||
|
PA_CHECK(count <=
|
||||||
|
std::numeric_limits<std::size_t>::max() / sizeof(value_type));
|
||||||
|
return static_cast<value_type*>(
|
||||||
|
InternalAllocatorRoot().Alloc<AllocFlags::kNoHooks>(count *
|
||||||
|
sizeof(value_type)));
|
||||||
|
}
|
||||||
|
template <typename T>
|
||||||
|
void InternalAllocator<T>::deallocate(value_type* ptr, std::size_t) {
|
||||||
|
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an object on heap in the internal partition.
|
||||||
|
template <typename T, typename... Args>
|
||||||
|
T* ConstructAtInternalPartition(Args&&... args) {
|
||||||
|
auto* memory = static_cast<T*>(
|
||||||
|
InternalAllocatorRoot().Alloc<AllocFlags::kNoHooks>(sizeof(T)));
|
||||||
|
return new (memory) T(std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy an object on heap in the internal partition.
|
||||||
|
template <typename T>
|
||||||
|
void DestroyAtInternalPartition(T* ptr) {
|
||||||
|
// Destroying an array is not supported.
|
||||||
|
static_assert(!std::is_array_v<T>);
|
||||||
|
ptr->~T();
|
||||||
|
InternalAllocatorRoot().Free<FreeFlags::kNoHooks>(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_H_
|
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2024 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_FORWARD_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_FORWARD_H_
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_forward.h"
|
||||||
|
|
||||||
|
// Internal Allocator can be used to get heap allocations required to
|
||||||
|
// implement PartitionAlloc's feature.
|
||||||
|
// As Internal Allocator being PartitionAlloc with minimal configuration,
|
||||||
|
// it is not allowed to use this allocator for PA's core implementation to avoid
|
||||||
|
// reentrancy issues. Also don't use this when satisfying the very first PA-E
|
||||||
|
// allocation of the process.
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
PartitionRoot& InternalAllocatorRoot();
|
||||||
|
|
||||||
|
// A class that meets C++ named requirements, Allocator.
|
||||||
|
template <typename T>
|
||||||
|
class InternalAllocator {
|
||||||
|
public:
|
||||||
|
using value_type = T;
|
||||||
|
using is_always_equal = std::true_type;
|
||||||
|
|
||||||
|
InternalAllocator() = default;
|
||||||
|
|
||||||
|
template <typename U>
|
||||||
|
InternalAllocator(const InternalAllocator<U>&) {} // NOLINT
|
||||||
|
|
||||||
|
template <typename U>
|
||||||
|
InternalAllocator& operator=(const InternalAllocator<U>&) {
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename U>
|
||||||
|
bool operator==(const InternalAllocator<U>&) {
|
||||||
|
// InternalAllocator<T> can free allocations made by InternalAllocator<U>.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
value_type* allocate(std::size_t count);
|
||||||
|
|
||||||
|
void deallocate(value_type* ptr, std::size_t);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Inherit this to make a class allocated on the internal partition.
|
||||||
|
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionAllocated {
|
||||||
|
static void* operator new(size_t count);
|
||||||
|
static void* operator new(size_t count, std::align_val_t alignment);
|
||||||
|
// Though we do not forward placement new, we need to define this explicitly
|
||||||
|
// to allow it.
|
||||||
|
static void* operator new(std::size_t, void* ptr) { return ptr; }
|
||||||
|
static void operator delete(void* ptr);
|
||||||
|
static void operator delete(void* ptr, std::align_val_t);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create an object on heap in the internal partition.
|
||||||
|
template <typename T, typename... Args>
|
||||||
|
T* ConstructAtInternalPartition(Args&&... args);
|
||||||
|
|
||||||
|
// Destroy an object on heap in the internal partition.
|
||||||
|
template <typename T>
|
||||||
|
void DestroyAtInternalPartition(T* ptr);
|
||||||
|
|
||||||
|
// A deleter for `std::unique_ptr<T>`.
|
||||||
|
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) InternalPartitionDeleter final {
|
||||||
|
void operator()(void* ptr) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_INTERNAL_ALLOCATOR_FORWARD_H_
|
@ -0,0 +1,136 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/lightweight_quarantine.h"
|
||||||
|
|
||||||
|
#include "partition_alloc/internal_allocator.h"
|
||||||
|
#include "partition_alloc/partition_page.h"
|
||||||
|
#include "partition_alloc/partition_root.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
LightweightQuarantineBranch LightweightQuarantineRoot::CreateBranch(
|
||||||
|
bool lock_required) {
|
||||||
|
return LightweightQuarantineBranch(*this, lock_required);
|
||||||
|
}
|
||||||
|
|
||||||
|
LightweightQuarantineBranch::LightweightQuarantineBranch(Root& root,
|
||||||
|
bool lock_required)
|
||||||
|
: root_(root), lock_required_(lock_required) {}
|
||||||
|
|
||||||
|
LightweightQuarantineBranch::LightweightQuarantineBranch(
|
||||||
|
LightweightQuarantineBranch&& b)
|
||||||
|
: root_(b.root_),
|
||||||
|
lock_required_(b.lock_required_),
|
||||||
|
slots_(std::move(b.slots_)),
|
||||||
|
branch_size_in_bytes_(b.branch_size_in_bytes_) {
|
||||||
|
b.branch_size_in_bytes_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
LightweightQuarantineBranch::~LightweightQuarantineBranch() {
|
||||||
|
Purge();
|
||||||
|
slots_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LightweightQuarantineBranch::Quarantine(void* object,
|
||||||
|
SlotSpanMetadata* slot_span,
|
||||||
|
uintptr_t slot_start) {
|
||||||
|
const auto usable_size = root_.allocator_root_.GetSlotUsableSize(slot_span);
|
||||||
|
|
||||||
|
const size_t capacity_in_bytes =
|
||||||
|
root_.capacity_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
|
||||||
|
{
|
||||||
|
ConditionalScopedGuard guard(lock_required_, lock_);
|
||||||
|
|
||||||
|
// Note that `root_` is _not_ locked while `this` is locked with `lock_`,
|
||||||
|
// so there is no synchronization between `root_` and `this` (branch)
|
||||||
|
// except for the single-branch use case.
|
||||||
|
const size_t root_size_in_bytes =
|
||||||
|
root_.size_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
// Due to no synchronization, `branch_size_in_bytes_` may be larger than
|
||||||
|
// `root_size_in_bytes`.
|
||||||
|
const size_t size_in_bytes_held_by_others =
|
||||||
|
branch_size_in_bytes_ < root_size_in_bytes
|
||||||
|
? root_size_in_bytes - branch_size_in_bytes_
|
||||||
|
: 0;
|
||||||
|
if (capacity_in_bytes < size_in_bytes_held_by_others + usable_size) {
|
||||||
|
// Even if this branch dequarantines all entries held by it, this entry
|
||||||
|
// cannot fit within the capacity.
|
||||||
|
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
|
||||||
|
root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dequarantine some entries as required.
|
||||||
|
PurgeInternal(capacity_in_bytes - usable_size);
|
||||||
|
|
||||||
|
// Update stats (locked).
|
||||||
|
branch_size_in_bytes_ += usable_size;
|
||||||
|
root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
|
||||||
|
// `root_.capacity_in_bytes_` is _not_ a hard limit, plus there is no
|
||||||
|
// synchronization between the root and branch, so `branch_size_in_bytes_`
|
||||||
|
// may be larger than `root_.capacity_in_bytes_` at this point.
|
||||||
|
|
||||||
|
slots_.emplace_back(slot_start, usable_size);
|
||||||
|
|
||||||
|
// Swap randomly so that the quarantine list remain shuffled.
|
||||||
|
// This is not uniformly random, but sufficiently random.
|
||||||
|
const size_t random_index = random_.RandUint32() % slots_.size();
|
||||||
|
std::swap(slots_[random_index], slots_.back());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update stats (not locked).
|
||||||
|
root_.count_.fetch_add(1, std::memory_order_relaxed);
|
||||||
|
root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
|
||||||
|
root_.cumulative_size_in_bytes_.fetch_add(usable_size,
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
|
||||||
|
ConditionalScopedGuard guard(lock_required_, lock_);
|
||||||
|
uintptr_t slot_start = root_.allocator_root_.ObjectToSlotStart(object);
|
||||||
|
for (const auto& slot : slots_) {
|
||||||
|
if (slot.slot_start == slot_start) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LightweightQuarantineBranch::PurgeInternal(size_t target_size_in_bytes) {
|
||||||
|
size_t size_in_bytes = root_.size_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
int64_t freed_count = 0;
|
||||||
|
int64_t freed_size_in_bytes = 0;
|
||||||
|
|
||||||
|
// Dequarantine some entries as required.
|
||||||
|
while (!slots_.empty() && target_size_in_bytes < size_in_bytes) {
|
||||||
|
// As quarantined entries are shuffled, picking last entry is equivalent
|
||||||
|
// to picking random entry.
|
||||||
|
const auto& to_free = slots_.back();
|
||||||
|
size_t to_free_size = to_free.usable_size;
|
||||||
|
|
||||||
|
auto* slot_span = SlotSpanMetadata::FromSlotStart(to_free.slot_start);
|
||||||
|
void* object = root_.allocator_root_.SlotStartToObject(to_free.slot_start);
|
||||||
|
PA_DCHECK(slot_span == SlotSpanMetadata::FromObject(object));
|
||||||
|
|
||||||
|
PA_DCHECK(to_free.slot_start);
|
||||||
|
root_.allocator_root_.FreeNoHooksImmediate(object, slot_span,
|
||||||
|
to_free.slot_start);
|
||||||
|
|
||||||
|
freed_count++;
|
||||||
|
freed_size_in_bytes += to_free_size;
|
||||||
|
size_in_bytes -= to_free_size;
|
||||||
|
|
||||||
|
slots_.pop_back();
|
||||||
|
}
|
||||||
|
|
||||||
|
branch_size_in_bytes_ -= freed_size_in_bytes;
|
||||||
|
root_.size_in_bytes_.fetch_sub(freed_size_in_bytes,
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
root_.count_.fetch_sub(freed_count, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
@ -0,0 +1,189 @@
|
|||||||
|
// Copyright 2023 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Lightweight Quarantine (LQ) provides a low-cost quarantine mechanism with
|
||||||
|
// following characteristics.
|
||||||
|
//
|
||||||
|
// - Built on PartitionAlloc: only supports allocations in a known root
|
||||||
|
// - As fast as PA: LQ just defers `Free()` handling and may benefit from thread
|
||||||
|
// cache etc.
|
||||||
|
// - Thread-safe
|
||||||
|
// - No allocation time information: triggered on `Free()`
|
||||||
|
// - Don't use quarantined objects' payload - available for zapping
|
||||||
|
// - Don't allocate heap memory.
|
||||||
|
// - Flexible to support several applications
|
||||||
|
//
|
||||||
|
// `LightweightQuarantineRoot` represents one quarantine system
|
||||||
|
// (e.g. scheduler loop quarantine).
|
||||||
|
// `LightweightQuarantineBranch` provides a quarantine request interface.
|
||||||
|
// It belongs to a `LightweightQuarantineRoot` and there can be multiple
|
||||||
|
// instances (e.g. one per thread). By having one branch per thread, it requires
|
||||||
|
// no lock for faster quarantine.
|
||||||
|
// ┌────────────────────────────┐
|
||||||
|
// │PartitionRoot │
|
||||||
|
// └┬──────────────────────────┬┘
|
||||||
|
// ┌▽────────────────────────┐┌▽────────────────────┐
|
||||||
|
// │LQRoot 1 ││LQRoot 2 │
|
||||||
|
// └┬───────────┬───────────┬┘└──────────────┬──┬──┬┘
|
||||||
|
// ┌▽─────────┐┌▽─────────┐┌▽─────────┐ ▽ ▽ ▽
|
||||||
|
// │LQBranch 1││LQBranch 2││LQBranch 3│
|
||||||
|
// └──────────┘└──────────┘└──────────┘
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
#include <memory>
|
||||||
|
#include <type_traits>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "partition_alloc/internal_allocator_forward.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/rand_util.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "partition_alloc/partition_alloc_forward.h"
|
||||||
|
#include "partition_alloc/partition_lock.h"
|
||||||
|
#include "partition_alloc/partition_stats.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
struct PartitionRoot;
|
||||||
|
struct LightweightQuarantineStats;
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
class LightweightQuarantineBranch;
|
||||||
|
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineRoot {
|
||||||
|
public:
|
||||||
|
explicit LightweightQuarantineRoot(PartitionRoot& allocator_root,
|
||||||
|
size_t capacity_in_bytes = 0)
|
||||||
|
: allocator_root_(allocator_root),
|
||||||
|
capacity_in_bytes_(capacity_in_bytes) {}
|
||||||
|
|
||||||
|
LightweightQuarantineBranch CreateBranch(bool lock_required = true);
|
||||||
|
|
||||||
|
void AccumulateStats(LightweightQuarantineStats& stats) const {
|
||||||
|
stats.count += count_.load(std::memory_order_relaxed);
|
||||||
|
stats.size_in_bytes += size_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
stats.cumulative_count += cumulative_count_.load(std::memory_order_relaxed);
|
||||||
|
stats.cumulative_size_in_bytes +=
|
||||||
|
cumulative_size_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
stats.quarantine_miss_count +=
|
||||||
|
quarantine_miss_count_.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetCapacityInBytes() const {
|
||||||
|
return capacity_in_bytes_.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
void SetCapacityInBytes(size_t capacity) {
|
||||||
|
capacity_in_bytes_.store(capacity, std::memory_order_relaxed);
|
||||||
|
// `size_in_bytes` may exceed `capacity_in_bytes` here.
|
||||||
|
// Each branch will try to shrink their quarantine later.
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
PartitionRoot& allocator_root_;
|
||||||
|
std::atomic_size_t capacity_in_bytes_;
|
||||||
|
// Total size of quarantined entries, capped by `capacity_in_bytes`.
|
||||||
|
std::atomic_size_t size_in_bytes_ = 0;
|
||||||
|
|
||||||
|
// Stats.
|
||||||
|
std::atomic_size_t count_ = 0; // Number of quarantined entries
|
||||||
|
std::atomic_size_t cumulative_count_ = 0;
|
||||||
|
std::atomic_size_t cumulative_size_in_bytes_ = 0;
|
||||||
|
std::atomic_size_t quarantine_miss_count_ = 0;
|
||||||
|
|
||||||
|
friend class LightweightQuarantineBranch;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LightweightQuarantineBranch {
|
||||||
|
public:
|
||||||
|
using Root = LightweightQuarantineRoot;
|
||||||
|
|
||||||
|
LightweightQuarantineBranch(const LightweightQuarantineBranch&) = delete;
|
||||||
|
LightweightQuarantineBranch(LightweightQuarantineBranch&& b);
|
||||||
|
~LightweightQuarantineBranch();
|
||||||
|
|
||||||
|
// Quarantines an object. This list holds information you put into `entry`
|
||||||
|
// as much as possible. If the object is too large, this may return
|
||||||
|
// `false`, meaning that quarantine request has failed (and freed
|
||||||
|
// immediately). Otherwise, returns `true`.
|
||||||
|
bool Quarantine(void* object,
|
||||||
|
SlotSpanMetadata* slot_span,
|
||||||
|
uintptr_t slot_start);
|
||||||
|
|
||||||
|
// Dequarantine all entries **held by this branch**.
|
||||||
|
// It is possible that another branch with entries and it remains untouched.
|
||||||
|
void Purge() {
|
||||||
|
ConditionalScopedGuard guard(lock_required_, lock_);
|
||||||
|
PurgeInternal(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determines this list contains an object.
|
||||||
|
bool IsQuarantinedForTesting(void* object);
|
||||||
|
|
||||||
|
Root& GetRoot() { return root_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
LightweightQuarantineBranch(Root& root, bool lock_required);
|
||||||
|
|
||||||
|
// Try to dequarantine entries to satisfy below:
|
||||||
|
// root_.size_in_bytes_ <= target_size_in_bytes
|
||||||
|
// It is possible that this branch cannot satisfy the
|
||||||
|
// request as it has control over only what it has. If you need to ensure the
|
||||||
|
// constraint, call `Purge()` for each branch in sequence, synchronously.
|
||||||
|
void PurgeInternal(size_t target_size_in_bytes)
|
||||||
|
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
|
|
||||||
|
Root& root_;
|
||||||
|
|
||||||
|
bool lock_required_;
|
||||||
|
Lock lock_;
|
||||||
|
|
||||||
|
// An utility to lock only if a condition is met.
|
||||||
|
class PA_SCOPED_LOCKABLE ConditionalScopedGuard {
|
||||||
|
public:
|
||||||
|
explicit ConditionalScopedGuard(bool condition, Lock& lock)
|
||||||
|
PA_EXCLUSIVE_LOCK_FUNCTION(lock)
|
||||||
|
: condition_(condition), lock_(lock) {
|
||||||
|
if (condition_) {
|
||||||
|
lock_.Acquire();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
~ConditionalScopedGuard() PA_UNLOCK_FUNCTION() {
|
||||||
|
if (condition_) {
|
||||||
|
lock_.Release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const bool condition_;
|
||||||
|
Lock& lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Non-cryptographic random number generator.
|
||||||
|
// Thread-unsafe so guarded by `lock_`.
|
||||||
|
base::InsecureRandomGenerator random_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
// `slots_` hold quarantined entries.
|
||||||
|
struct QuarantineSlot {
|
||||||
|
uintptr_t slot_start;
|
||||||
|
size_t usable_size;
|
||||||
|
};
|
||||||
|
std::vector<QuarantineSlot, InternalAllocator<QuarantineSlot>> slots_
|
||||||
|
PA_GUARDED_BY(lock_);
|
||||||
|
size_t branch_size_in_bytes_ PA_GUARDED_BY(lock_) = 0;
|
||||||
|
|
||||||
|
friend class LightweightQuarantineRoot;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_LIGHTWEIGHT_QUARANTINE_H_
|
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "partition_alloc/memory_reclaimer.h"
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "partition_alloc/partition_alloc_buildflags.h"
|
||||||
|
#include "partition_alloc/partition_alloc_check.h"
|
||||||
|
#include "partition_alloc/partition_alloc_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_STARSCAN)
|
||||||
|
#include "partition_alloc/starscan/pcscan.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// static
|
||||||
|
MemoryReclaimer* MemoryReclaimer::Instance() {
|
||||||
|
static internal::base::NoDestructor<MemoryReclaimer> instance;
|
||||||
|
return instance.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::RegisterPartition(PartitionRoot* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
auto it_and_whether_inserted = partitions_.insert(partition);
|
||||||
|
PA_DCHECK(it_and_whether_inserted.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::UnregisterPartition(PartitionRoot* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
size_t erased_count = partitions_.erase(partition);
|
||||||
|
PA_DCHECK(erased_count == 1u);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryReclaimer::MemoryReclaimer() = default;
|
||||||
|
MemoryReclaimer::~MemoryReclaimer() = default;
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimAll() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages |
|
||||||
|
PurgeFlags::kAggressiveReclaim;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimNormal() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::Reclaim(int flags) {
|
||||||
|
internal::ScopedGuard lock(
|
||||||
|
lock_); // Has to protect from concurrent (Un)Register calls.
|
||||||
|
|
||||||
|
// PCScan quarantines freed slots. Trigger the scan first to let it call
|
||||||
|
// FreeNoHooksImmediate on slots that pass the quarantine.
|
||||||
|
//
|
||||||
|
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
|
||||||
|
// so that the slots are actually freed. (This is done synchronously only for
|
||||||
|
// the current thread.)
|
||||||
|
//
|
||||||
|
// Lastly decommit empty slot spans and lastly try to discard unused pages at
|
||||||
|
// the end of the remaining active slots.
|
||||||
|
#if PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) && BUILDFLAG(USE_STARSCAN)
|
||||||
|
{
|
||||||
|
using PCScan = internal::PCScan;
|
||||||
|
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
|
||||||
|
? PCScan::InvocationMode::kForcedBlocking
|
||||||
|
: PCScan::InvocationMode::kBlocking;
|
||||||
|
PCScan::PerformScanIfNeeded(invocation_mode);
|
||||||
|
}
|
||||||
|
#endif // PA_CONFIG(STARSCAN_ENABLE_STARSCAN_ON_RECLAIM) &&
|
||||||
|
// BUILDFLAG(USE_STARSCAN)
|
||||||
|
|
||||||
|
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
// Don't completely empty the thread cache outside of low memory situations,
|
||||||
|
// as there is periodic purge which makes sure that it doesn't take too much
|
||||||
|
// space.
|
||||||
|
if (flags & PurgeFlags::kAggressiveReclaim) {
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
}
|
||||||
|
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
for (auto* partition : partitions_) {
|
||||||
|
partition->PurgeMemory(flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ResetForTesting() {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
partitions_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,71 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
#include "partition_alloc/partition_alloc_base/component_export.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "partition_alloc/partition_alloc_base/time/time.h"
|
||||||
|
#include "partition_alloc/partition_alloc_forward.h"
|
||||||
|
#include "partition_alloc/partition_lock.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Posts and handles memory reclaim tasks for PartitionAlloc.
|
||||||
|
//
|
||||||
|
// PartitionAlloc users are responsible for scheduling and calling the
|
||||||
|
// reclamation methods with their own timers / event loops.
|
||||||
|
//
|
||||||
|
// Singleton as this runs as long as the process is alive, and
|
||||||
|
// having multiple instances would be wasteful.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
|
||||||
|
public:
|
||||||
|
static MemoryReclaimer* Instance();
|
||||||
|
|
||||||
|
MemoryReclaimer(const MemoryReclaimer&) = delete;
|
||||||
|
MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
|
||||||
|
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Registers a partition to be tracked by the reclaimer.
|
||||||
|
void RegisterPartition(PartitionRoot* partition);
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Unregisters a partition to be tracked by the reclaimer.
|
||||||
|
void UnregisterPartition(PartitionRoot* partition);
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now to reclaim as much free memory as
|
||||||
|
// possible. The API callers need to invoke this method periodically
|
||||||
|
// if they want to use memory reclaimer.
|
||||||
|
// See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
|
||||||
|
void ReclaimNormal();
|
||||||
|
|
||||||
|
// Returns a recommended interval to invoke ReclaimNormal.
|
||||||
|
int64_t GetRecommendedReclaimIntervalInMicroseconds() {
|
||||||
|
return internal::base::Seconds(4).InMicroseconds();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now reclaiming all free memory
|
||||||
|
void ReclaimAll();
|
||||||
|
|
||||||
|
private:
|
||||||
|
MemoryReclaimer();
|
||||||
|
~MemoryReclaimer();
|
||||||
|
// |flags| is an OR of base::PartitionPurgeFlags
|
||||||
|
void Reclaim(int flags);
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
internal::Lock lock_;
|
||||||
|
std::set<PartitionRoot*> partitions_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
friend class internal::base::NoDestructor<MemoryReclaimer>;
|
||||||
|
friend class MemoryReclaimerTest;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SRC_PARTITION_ALLOC_MEMORY_RECLAIMER_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user