mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-12-01 01:36:09 +03:00
Import chromium-106.0.5249.91
This commit is contained in:
commit
1a8a462190
39
src/.clang-format
Normal file
39
src/.clang-format
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Defines the Chromium style for automatic reformatting.
|
||||||
|
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||||
|
BasedOnStyle: Chromium
|
||||||
|
# This defaults to 'Auto'. Explicitly set it for a while, so that
|
||||||
|
# 'vector<vector<int> >' in existing files gets formatted to
|
||||||
|
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
|
||||||
|
# 'int>>' if the file already contains at least one such instance.)
|
||||||
|
Standard: Cpp11
|
||||||
|
|
||||||
|
# Make sure code like:
|
||||||
|
# IPC_BEGIN_MESSAGE_MAP()
|
||||||
|
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
|
||||||
|
# IPC_END_MESSAGE_MAP()
|
||||||
|
# gets correctly indented.
|
||||||
|
MacroBlockBegin: "^\
|
||||||
|
BEGIN_MSG_MAP|\
|
||||||
|
BEGIN_MSG_MAP_EX|\
|
||||||
|
BEGIN_SAFE_MSG_MAP_EX|\
|
||||||
|
CR_BEGIN_MSG_MAP_EX|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN_WITH_PARENT|\
|
||||||
|
IPC_STRUCT_TRAITS_BEGIN|\
|
||||||
|
POLPARAMS_BEGIN|\
|
||||||
|
PPAPI_BEGIN_MESSAGE_MAP$"
|
||||||
|
MacroBlockEnd: "^\
|
||||||
|
CR_END_MSG_MAP|\
|
||||||
|
END_MSG_MAP|\
|
||||||
|
IPC_END_MESSAGE_MAP|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
|
||||||
|
IPC_STRUCT_END|\
|
||||||
|
IPC_STRUCT_TRAITS_END|\
|
||||||
|
POLPARAMS_END|\
|
||||||
|
PPAPI_END_MESSAGE_MAP$"
|
||||||
|
|
||||||
|
# TODO: Remove this once clang-format r357700 is rolled in.
|
||||||
|
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']
|
58
src/.gitattributes
vendored
Normal file
58
src/.gitattributes
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Stop Windows python license check presubmit errors by forcing LF checkout.
|
||||||
|
*.py text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
|
||||||
|
/net/http/*.pins text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout for all source files
|
||||||
|
*.bin binary
|
||||||
|
*.c text eol=lf
|
||||||
|
*.cc text eol=lf
|
||||||
|
*.cpp text eol=lf
|
||||||
|
*.csv text eol=lf
|
||||||
|
*.grd text eol=lf
|
||||||
|
*.grdp text eol=lf
|
||||||
|
*.gn text eol=lf
|
||||||
|
*.gni text eol=lf
|
||||||
|
*.h text eol=lf
|
||||||
|
*.html text eol=lf
|
||||||
|
*.idl text eol=lf
|
||||||
|
*.in text eol=lf
|
||||||
|
*.inc text eol=lf
|
||||||
|
*.java text eol=lf
|
||||||
|
*.js text eol=lf
|
||||||
|
*.json text eol=lf
|
||||||
|
*.json5 text eol=lf
|
||||||
|
*.md text eol=lf
|
||||||
|
*.mm text eol=lf
|
||||||
|
*.mojom text eol=lf
|
||||||
|
*.pdf -diff
|
||||||
|
*.proto text eol=lf
|
||||||
|
*.rs text eol=lf
|
||||||
|
*.sh text eol=lf
|
||||||
|
*.sql text eol=lf
|
||||||
|
*.toml text eol=lf
|
||||||
|
*.txt text eol=lf
|
||||||
|
*.xml text eol=lf
|
||||||
|
*.xslt text eol=lf
|
||||||
|
.clang-format text eol=lf
|
||||||
|
.eslintrc.js text eol=lf
|
||||||
|
.git-blame-ignore-revs text eol=lf
|
||||||
|
.gitattributes text eol=lf
|
||||||
|
.gitignore text eol=lf
|
||||||
|
.vpython text eol=lf
|
||||||
|
codereview.settings text eol=lf
|
||||||
|
DEPS text eol=lf
|
||||||
|
ENG_REVIEW_OWNERS text eol=lf
|
||||||
|
LICENSE text eol=lf
|
||||||
|
LICENSE.* text eol=lf
|
||||||
|
MAJOR_BRANCH_DATE text eol=lf
|
||||||
|
OWNERS text eol=lf
|
||||||
|
README text eol=lf
|
||||||
|
README.* text eol=lf
|
||||||
|
WATCHLISTS text eol=lf
|
||||||
|
VERSION text eol=lf
|
||||||
|
DIR_METADATA text eol=lf
|
||||||
|
|
||||||
|
# Skip Tricium by default on files in third_party.
|
||||||
|
third_party/** -tricium
|
172
src/.gn
Normal file
172
src/.gn
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
# This file is used by the GN meta build system to find the root of the source
|
||||||
|
# tree and to set startup options. For documentation on the values set in this
|
||||||
|
# file, run "gn help dotfile" at the command line.
|
||||||
|
|
||||||
|
import("//build/dotfile_settings.gni")
|
||||||
|
import("//third_party/angle/dotfile_settings.gni")
|
||||||
|
|
||||||
|
# The location of the build configuration file.
|
||||||
|
buildconfig = "//build/config/BUILDCONFIG.gn"
|
||||||
|
|
||||||
|
# The python interpreter to use by default. On Windows, this will look
|
||||||
|
# for python3.exe and python3.bat.
|
||||||
|
script_executable = "python3"
|
||||||
|
|
||||||
|
# These arguments override the default values for items in a declare_args
|
||||||
|
# block. "gn args" in turn can override these.
|
||||||
|
#
|
||||||
|
# In general the value for a build arg in the declare_args block should be the
|
||||||
|
# default. In some cases, a DEPS-ed in project will want different defaults for
|
||||||
|
# being built as part of Chrome vs. being built standalone. In this case, the
|
||||||
|
# Chrome defaults should go here. There should be no overrides here for
|
||||||
|
# values declared in the main Chrome repository.
|
||||||
|
#
|
||||||
|
# Important note for defining defaults: This file is executed before the
|
||||||
|
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
|
||||||
|
# This means that the default_args can not depend on the platform,
|
||||||
|
# architecture, or other build parameters. If you really need that, the other
|
||||||
|
# repo should define a flag that toggles on a behavior that implements the
|
||||||
|
# additional logic required by Chrome to set the variables.
|
||||||
|
default_args = {
|
||||||
|
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
|
||||||
|
# a warning that "Build argument has no effect". When adding a v8 variable, it
|
||||||
|
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
|
||||||
|
# location when it is removed).
|
||||||
|
|
||||||
|
v8_enable_gdbjit = false
|
||||||
|
v8_imminent_deprecation_warnings = false
|
||||||
|
|
||||||
|
# Don't include webrtc's builtin task queue implementation.
|
||||||
|
rtc_link_task_queue_impl = false
|
||||||
|
|
||||||
|
# Don't include the iLBC audio codec.
|
||||||
|
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
|
||||||
|
# deps on codecs, we can remove this.
|
||||||
|
rtc_include_ilbc = false
|
||||||
|
|
||||||
|
# Changes some setup for the Crashpad build to set them to build against
|
||||||
|
# Chromium's zlib, base, etc.
|
||||||
|
crashpad_dependencies = "chromium"
|
||||||
|
|
||||||
|
# Override ANGLE's Vulkan dependencies.
|
||||||
|
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
|
||||||
|
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
|
||||||
|
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
|
||||||
|
angle_vulkan_validation_layers_dir =
|
||||||
|
"//third_party/vulkan-deps/vulkan-validation-layers/src"
|
||||||
|
|
||||||
|
# Overwrite default args declared in the Fuchsia sdk
|
||||||
|
fuchsia_sdk_readelf_exec =
|
||||||
|
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
|
||||||
|
fuchsia_target_api_level = 9
|
||||||
|
|
||||||
|
devtools_visibility = [ "*" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# These are the targets to skip header checking by default. The files in targets
|
||||||
|
# matching these patterns (see "gn help label_pattern" for format) will not have
|
||||||
|
# their includes checked for proper dependencies when you run either
|
||||||
|
# "gn check" or "gn gen --check".
|
||||||
|
no_check_targets = [
|
||||||
|
"//headless:headless_non_renderer", # 2 errors
|
||||||
|
"//headless:headless_renderer", # 12 errors
|
||||||
|
"//headless:headless_shared_sources", # 2 errors
|
||||||
|
"//headless:headless_shell_browser_lib", # 1 errors
|
||||||
|
|
||||||
|
# //v8, https://crbug.com/v8/7330
|
||||||
|
"//v8/src/inspector:inspector", # 20 errors
|
||||||
|
"//v8/test/cctest:cctest_sources", # 2 errors
|
||||||
|
"//v8:cppgc_base", # 1 error
|
||||||
|
"//v8:v8_internal_headers", # 11 errors
|
||||||
|
"//v8:v8_libplatform", # 2 errors
|
||||||
|
|
||||||
|
# After making partition_alloc a standalone library, remove partition_alloc
|
||||||
|
# target from the skip list, because partition_aloc will depend on its own
|
||||||
|
# base.
|
||||||
|
# partition alloc standalone library bug is https://crbug.com/1151236.
|
||||||
|
"//base/allocator/partition_allocator:partition_alloc", # 292 errors
|
||||||
|
]
|
||||||
|
|
||||||
|
# These are the list of GN files that run exec_script. This whitelist exists
|
||||||
|
# to force additional review for new uses of exec_script, which is strongly
|
||||||
|
# discouraged.
|
||||||
|
#
|
||||||
|
# PLEASE READ
|
||||||
|
#
|
||||||
|
# You should almost never need to add new exec_script calls. exec_script is
|
||||||
|
# slow, especially on Windows, and can cause confusing effects. Although
|
||||||
|
# individually each call isn't slow or necessarily very confusing, at the scale
|
||||||
|
# of our repo things get out of hand quickly. By strongly pushing back on all
|
||||||
|
# additions, we keep the build fast and clean. If you think you need to add a
|
||||||
|
# new call, please consider:
|
||||||
|
#
|
||||||
|
# - Do not use a script to check for the existence of a file or directory to
|
||||||
|
# enable a different mode. Instead, use GN build args to enable or disable
|
||||||
|
# functionality and set options. An example is checking for a file in the
|
||||||
|
# src-internal repo to see if the corresponding src-internal feature should
|
||||||
|
# be enabled. There are several things that can go wrong with this:
|
||||||
|
#
|
||||||
|
# - It's mysterious what causes some things to happen. Although in many cases
|
||||||
|
# such behavior can be conveniently automatic, GN optimizes for explicit
|
||||||
|
# and obvious behavior so people can more easily diagnose problems.
|
||||||
|
#
|
||||||
|
# - The user can't enable a mode for one build and not another. With GN build
|
||||||
|
# args, the user can choose the exact configuration of multiple builds
|
||||||
|
# using one checkout. But implicitly basing flags on the state of the
|
||||||
|
# checkout, this functionality is broken.
|
||||||
|
#
|
||||||
|
# - It's easy to get stale files. If for example the user edits the gclient
|
||||||
|
# to stop checking out src-internal (or any other optional thing), it's
|
||||||
|
# easy to end up with stale files still mysteriously triggering build
|
||||||
|
# conditions that are no longer appropriate (yes, this happens in real
|
||||||
|
# life).
|
||||||
|
#
|
||||||
|
# - Do not use a script to iterate files in a directory (glob):
|
||||||
|
#
|
||||||
|
# - This has the same "stale file" problem as the above discussion. Various
|
||||||
|
# operations can leave untracked files in the source tree which can cause
|
||||||
|
# surprising effects.
|
||||||
|
#
|
||||||
|
# - It becomes impossible to use "git grep" to find where a certain file is
|
||||||
|
# referenced. This operation is very common and people really do get
|
||||||
|
# confused when things aren't listed.
|
||||||
|
#
|
||||||
|
# - It's easy to screw up. One common case is a build-time script that packs
|
||||||
|
# up a directory. The author notices that the script isn't re-run when the
|
||||||
|
# directory is updated, so adds a glob so all the files are listed as
|
||||||
|
# inputs. This seems to work great... until a file is deleted. When a
|
||||||
|
# file is deleted, all the inputs the glob lists will still be up to date
|
||||||
|
# and no command-lines will have been changed. The action will not be
|
||||||
|
# re-run and the build will be broken. It is possible to get this correct
|
||||||
|
# using glob, and it's possible to mess it up without glob, but globs make
|
||||||
|
# this situation much easier to create. if the build always lists the
|
||||||
|
# files and passes them to a script, it will always be correct.
|
||||||
|
|
||||||
|
exec_script_whitelist =
|
||||||
|
build_dotfile_settings.exec_script_whitelist +
|
||||||
|
angle_dotfile_settings.exec_script_whitelist +
|
||||||
|
[
|
||||||
|
# Whitelist entries for //build should go into
|
||||||
|
# //build/dotfile_settings.gni instead, so that they can be shared
|
||||||
|
# with other repos. The entries in this list should be only for files
|
||||||
|
# in the Chromium repo outside of //build.
|
||||||
|
"//build_overrides/build.gni",
|
||||||
|
|
||||||
|
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
|
||||||
|
"//chrome/version.gni",
|
||||||
|
|
||||||
|
# TODO(dgn): Layer violation but breaks the build otherwise, see
|
||||||
|
# https://crbug.com/474506.
|
||||||
|
"//clank/java/BUILD.gn",
|
||||||
|
"//clank/native/BUILD.gn",
|
||||||
|
|
||||||
|
"//google_apis/BUILD.gn",
|
||||||
|
"//printing/BUILD.gn",
|
||||||
|
|
||||||
|
"//remoting/host/installer/linux/BUILD.gn",
|
||||||
|
"//remoting/remoting_version.gni",
|
||||||
|
"//remoting/host/installer/win/generate_clsids.gni",
|
||||||
|
|
||||||
|
"//tools/grit/grit_rule.gni",
|
||||||
|
"//tools/gritsettings/BUILD.gn",
|
||||||
|
]
|
1454
src/AUTHORS
Normal file
1454
src/AUTHORS
Normal file
File diff suppressed because it is too large
Load Diff
1779
src/BUILD.gn
Normal file
1779
src/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
27
src/LICENSE
Normal file
27
src/LICENSE
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2015 The Chromium Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4762
src/base/BUILD.gn
Normal file
4762
src/base/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
28
src/base/DEPS
Normal file
28
src/base/DEPS
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
include_rules = [
|
||||||
|
"+third_party/ashmem",
|
||||||
|
"+third_party/apple_apsl",
|
||||||
|
"+third_party/boringssl/src/include",
|
||||||
|
"+third_party/ced",
|
||||||
|
"+third_party/libevent",
|
||||||
|
"+third_party/libunwindstack/src/libunwindstack/include",
|
||||||
|
"+third_party/lss",
|
||||||
|
"+third_party/modp_b64",
|
||||||
|
"+third_party/perfetto/include",
|
||||||
|
"+third_party/perfetto/protos/perfetto",
|
||||||
|
# Conversions between base and Rust types (e.g. base::span <-> rust::Slice)
|
||||||
|
# require the cxx.h header from cxx. This is only used if Rust is enabled
|
||||||
|
# in the gn build; see //base/BUILD.gn's conditional dependency on
|
||||||
|
# //build/rust:cxx_cppdeps.
|
||||||
|
"+third_party/rust/cxx",
|
||||||
|
"+third_party/test_fonts",
|
||||||
|
|
||||||
|
# These are implicitly brought in from the root, and we don't want them.
|
||||||
|
"-ipc",
|
||||||
|
"-url",
|
||||||
|
|
||||||
|
# ICU dependendencies must be separate from the rest of base.
|
||||||
|
"-i18n",
|
||||||
|
|
||||||
|
# //base/util can use //base but not vice versa.
|
||||||
|
"-util",
|
||||||
|
]
|
3
src/base/DIR_METADATA
Normal file
3
src/base/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Internals>Core"
|
||||||
|
}
|
40
src/base/OWNERS
Normal file
40
src/base/OWNERS
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# See //base/README.md to find qualification for being an owner.
|
||||||
|
|
||||||
|
set noparent
|
||||||
|
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
danakj@chromium.org
|
||||||
|
dcheng@chromium.org
|
||||||
|
fdoray@chromium.org
|
||||||
|
gab@chromium.org
|
||||||
|
kylechar@chromium.org
|
||||||
|
mark@chromium.org
|
||||||
|
thakis@chromium.org
|
||||||
|
thestig@chromium.org
|
||||||
|
wez@chromium.org
|
||||||
|
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
|
||||||
|
# per-file rules:
|
||||||
|
# These are for the common case of adding or renaming files. If you're doing
|
||||||
|
# structural changes, please get a review from a reviewer in this file.
|
||||||
|
per-file BUILD.gn=*
|
||||||
|
|
||||||
|
# For Android-specific changes:
|
||||||
|
per-file ..._android*=file://base/android/OWNERS
|
||||||
|
|
||||||
|
# For Fuchsia-specific changes:
|
||||||
|
per-file ..._fuchsia*=file://build/fuchsia/OWNERS
|
||||||
|
|
||||||
|
# For Windows-specific changes:
|
||||||
|
per-file ..._win*=file://base/win/OWNERS
|
||||||
|
|
||||||
|
per-file callback_list*=pkasting@chromium.org
|
||||||
|
per-file feature_list*=asvitkine@chromium.org
|
||||||
|
per-file feature_list*=isherman@chromium.org
|
||||||
|
|
||||||
|
# Restricted since rand_util.h also backs the cryptographically secure RNG.
|
||||||
|
per-file rand_util*=set noparent
|
||||||
|
per-file rand_util*=file://ipc/SECURITY_OWNERS
|
||||||
|
|
||||||
|
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS
|
163
src/base/PRESUBMIT.py
Normal file
163
src/base/PRESUBMIT.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
"""Chromium presubmit script for src/base.
|
||||||
|
|
||||||
|
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||||
|
for more details on the presubmit API built into depot_tools.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
USE_PYTHON3 = True
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeLintsClean(input_api, output_api):
|
||||||
|
"""Makes sure that the code is cpplint clean."""
|
||||||
|
# lint_filters=[] stops the OFF_BY_DEFAULT_LINT_FILTERS from being disabled,
|
||||||
|
# finding many more issues. verbose_level=1 finds a small number of additional
|
||||||
|
# issues.
|
||||||
|
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
|
||||||
|
# Only process those extensions which are used in Chromium, in directories
|
||||||
|
# that currently lint clean.
|
||||||
|
CLEAN_CPP_FILES_ONLY = (r'base[\\/]win[\\/].*\.(cc|h)$', )
|
||||||
|
source_file_filter = lambda x: input_api.FilterSourceFile(
|
||||||
|
x,
|
||||||
|
files_to_check=CLEAN_CPP_FILES_ONLY,
|
||||||
|
files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
|
||||||
|
return input_api.canned_checks.CheckChangeLintsClean(
|
||||||
|
input_api, output_api, source_file_filter=source_file_filter,
|
||||||
|
lint_filters=[], verbose_level=1)
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoInterfacesInBase(input_api, output_api):
|
||||||
|
"""Checks to make sure no files in libbase.a have |@interface|."""
|
||||||
|
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
|
||||||
|
files = []
|
||||||
|
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
|
||||||
|
if (f.LocalPath().startswith('base/') and
|
||||||
|
not "/ios/" in f.LocalPath() and
|
||||||
|
not "/test/" in f.LocalPath() and
|
||||||
|
not f.LocalPath().endswith('.java') and
|
||||||
|
not f.LocalPath().endswith('_unittest.mm') and
|
||||||
|
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
|
||||||
|
contents = input_api.ReadFile(f)
|
||||||
|
if pattern.search(contents):
|
||||||
|
files.append(f)
|
||||||
|
|
||||||
|
if len(files):
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Objective-C interfaces or categories are forbidden in libbase. ' +
|
||||||
|
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
|
||||||
|
'browse_thread/thread/efb28c10435987fd',
|
||||||
|
files) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
|
||||||
|
"""Returns locations matching one of the search_regexes."""
|
||||||
|
def FilterFile(affected_file):
|
||||||
|
return input_api.FilterSourceFile(
|
||||||
|
affected_file,
|
||||||
|
files_to_check=files_to_check,
|
||||||
|
files_to_skip=files_to_skip)
|
||||||
|
|
||||||
|
no_presubmit = r"// no-presubmit-check"
|
||||||
|
locations = []
|
||||||
|
for f in input_api.AffectedSourceFiles(FilterFile):
|
||||||
|
for line_num, line in f.ChangedContents():
|
||||||
|
for search_regex in search_regexes:
|
||||||
|
if (input_api.re.search(search_regex, line) and
|
||||||
|
not input_api.re.search(no_presubmit, line)):
|
||||||
|
locations.append(" %s:%d" % (f.LocalPath(), line_num))
|
||||||
|
break
|
||||||
|
return locations
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoTraceEventInclude(input_api, output_api):
|
||||||
|
"""Verify that //base includes base_tracing.h instead of trace event headers.
|
||||||
|
|
||||||
|
Checks that files outside trace event implementation include the
|
||||||
|
base_tracing.h header instead of specific trace event implementation headers
|
||||||
|
to maintain compatibility with the gn flag "enable_base_tracing = false".
|
||||||
|
"""
|
||||||
|
discouraged_includes = [
|
||||||
|
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
|
||||||
|
r'^#include "third_party/perfetto/include/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*[\\/]test[\\/].*",
|
||||||
|
r".*[\\/]trace_event[\\/].*",
|
||||||
|
r".*[\\/]tracing[\\/].*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
|
||||||
|
'of trace_event implementation headers. If you need to include an\n' +
|
||||||
|
'implementation header, verify that "gn check" and base_unittests\n' +
|
||||||
|
'still pass with gn arg "enable_base_tracing = false" and add\n' +
|
||||||
|
'"// no-presubmit-check" after the include. \n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _WarnPbzeroIncludes(input_api, output_api):
|
||||||
|
"""Warn to check enable_base_tracing=false when including a pbzero header.
|
||||||
|
|
||||||
|
Emits a warning when including a perfetto pbzero header, encouraging the
|
||||||
|
user to verify that //base still builds with enable_base_tracing=false.
|
||||||
|
"""
|
||||||
|
warn_includes = [
|
||||||
|
r'^#include "third_party/perfetto/protos/',
|
||||||
|
r'^#include "base/tracing/protos/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*[\\/]test[\\/].*",
|
||||||
|
r".*[\\/]trace_event[\\/].*",
|
||||||
|
r".*[\\/]tracing[\\/].*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, warn_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitPromptWarning(
|
||||||
|
'Please verify that "gn check" and base_unittests still pass with\n' +
|
||||||
|
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
|
||||||
|
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
|
||||||
|
'to exclude pbzero headers and anything not supported by\n' +
|
||||||
|
'//base/trace_event/trace_event_stub.h.\n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _CommonChecks(input_api, output_api):
|
||||||
|
"""Checks common to both upload and commit."""
|
||||||
|
results = []
|
||||||
|
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
|
||||||
|
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
|
||||||
|
results.extend(_WarnPbzeroIncludes(input_api, output_api))
|
||||||
|
results.extend(CheckChangeLintsClean(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnUpload(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnCommit(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
81
src/base/README.md
Normal file
81
src/base/README.md
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# What is this
|
||||||
|
Contains a written down set of principles and other information on //base.
|
||||||
|
Please add to it!
|
||||||
|
|
||||||
|
## About //base:
|
||||||
|
|
||||||
|
Chromium is a very mature project. Most things that are generally useful are
|
||||||
|
already here and things not here aren't generally useful.
|
||||||
|
|
||||||
|
The bar for adding stuff to base is that it must have demonstrated wide
|
||||||
|
applicability. Prefer to add things closer to where they're used (i.e. "not
|
||||||
|
base"), and pull into base only when needed. In a project our size,
|
||||||
|
sometimes even duplication is OK and inevitable.
|
||||||
|
|
||||||
|
Adding a new logging macro `DPVELOG_NE` is not more clear than just
|
||||||
|
writing the stuff you want to log in a regular logging statement, even
|
||||||
|
if it makes your calling code longer. Just add it to your own code.
|
||||||
|
|
||||||
|
If the code in question does not need to be used inside base, but will have
|
||||||
|
multiple consumers across the codebase, consider placing it in a new directory
|
||||||
|
under components/ instead.
|
||||||
|
|
||||||
|
base is written for the Chromium project and is not intended to be used
|
||||||
|
outside it. Using base outside of src.git is explicitly not supported,
|
||||||
|
and base makes no guarantees about API (or even ABI) stability (like all
|
||||||
|
other code in Chromium). New code that depends on base/ must be in
|
||||||
|
src.git. Code that's not in src.git but pulled in through DEPS (for
|
||||||
|
example, v8) cannot use base.
|
||||||
|
|
||||||
|
## Qualifications for being in //base OWNERS
|
||||||
|
* interest and ability to learn low level/high detail/complex c++ stuff
|
||||||
|
* inclination to always ask why and understand everything (including external
|
||||||
|
interactions like win32) rather than just hoping the author did it right
|
||||||
|
* mentorship/experience
|
||||||
|
* demonstrated good judgement (esp with regards to public APIs) over a length
|
||||||
|
of time
|
||||||
|
|
||||||
|
Owners are added when a contributor has shown the above qualifications and
|
||||||
|
when they express interest. There isn't an upper bound on the number of OWNERS.
|
||||||
|
|
||||||
|
## Design and naming
|
||||||
|
* Be sure to use the base namespace.
|
||||||
|
* STL-like constructs should adhere as closely to STL as possible. Functions
|
||||||
|
and behaviors not present in STL should only be added when they are related
|
||||||
|
to the specific data structure implemented by the container.
|
||||||
|
* For STL-like constructs our policy is that they should use STL-like naming
|
||||||
|
even when it may conflict with the style guide. So functions and class names
|
||||||
|
should be lower case with underscores. Non-STL-like classes and functions
|
||||||
|
should use Google naming.
|
||||||
|
|
||||||
|
## Performance testing
|
||||||
|
|
||||||
|
Since the primitives provided by //base are used very widely, it is important to
|
||||||
|
ensure they scale to the necessary workloads and perform well under all
|
||||||
|
supported platforms. The `base_perftests` target is a suite of
|
||||||
|
synthetic microbenchmarks that measure performance in various scenarios:
|
||||||
|
|
||||||
|
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
|
||||||
|
queue in isolation.
|
||||||
|
* ConditionVariablePerfTest: Measures thread switching cost of condition
|
||||||
|
variables.
|
||||||
|
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
|
||||||
|
machinery.
|
||||||
|
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
|
||||||
|
* MessageLoopPerfTest: Measures the speed of task posting in various
|
||||||
|
configurations.
|
||||||
|
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
|
||||||
|
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
|
||||||
|
pthreads.
|
||||||
|
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
|
||||||
|
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
|
||||||
|
underlying task runners.
|
||||||
|
* TaskObserverPerfTest: Measures the incremental cost of adding task
|
||||||
|
observers.
|
||||||
|
* TaskPerfTest: Checks the cost of posting tasks between threads.
|
||||||
|
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
|
||||||
|
multithreaded scenarios.
|
||||||
|
|
||||||
|
Regressions in these benchmarks can generally by caused by 1) operating system
|
||||||
|
changes, 2) compiler version or flag changes or 3) changes in //base code
|
||||||
|
itself.
|
13
src/base/SECURITY_OWNERS
Normal file
13
src/base/SECURITY_OWNERS
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Changes to code that runs at high privilege and which has a high risk of
|
||||||
|
# memory corruption, such as parsers for complex inputs, require a security
|
||||||
|
# review to avoid introducing sandbox escapes.
|
||||||
|
#
|
||||||
|
# Although this file is in base/, it may apply to more than just base, OWNERS
|
||||||
|
# files outside of base may also include this file.
|
||||||
|
#
|
||||||
|
# Security team: If you are uncomfortable reviewing a particular bit of code
|
||||||
|
# yourself, don't hesitate to seek help from another security team member!
|
||||||
|
# Nobody knows everything, and the only way to learn is from experience.
|
||||||
|
dcheng@chromium.org
|
||||||
|
rsesek@chromium.org
|
||||||
|
tsepez@chromium.org
|
97
src/base/allocator/BUILD.gn
Normal file
97
src/base/allocator/BUILD.gn
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/allocator.gni")
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/compiler/compiler.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
|
||||||
|
buildflag_header("buildflags") {
|
||||||
|
header = "buildflags.h"
|
||||||
|
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||||
|
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||||
|
"Partition alloc requires the allocator shim")
|
||||||
|
|
||||||
|
# BackupRefPtr(BRP) build flags.
|
||||||
|
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||||
|
_put_ref_count_in_previous_slot =
|
||||||
|
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||||
|
_enable_backup_ref_ptr_slow_checks =
|
||||||
|
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||||
|
_enable_dangling_raw_ptr_checks =
|
||||||
|
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||||
|
|
||||||
|
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||||
|
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||||
|
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||||
|
"USE_PARTITION_ALLOC=$use_partition_alloc",
|
||||||
|
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||||
|
|
||||||
|
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||||
|
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||||
|
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||||
|
|
||||||
|
# Not to be used directly - see `partition_alloc_config.h`.
|
||||||
|
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_apple) {
|
||||||
|
source_set("early_zone_registration_mac") {
|
||||||
|
sources = [
|
||||||
|
"early_zone_registration_mac.cc",
|
||||||
|
"early_zone_registration_mac.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
":buildflags",
|
||||||
|
"//base/allocator/partition_allocator:buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
|
||||||
|
config("wrap_malloc_symbols") {
|
||||||
|
ldflags = [
|
||||||
|
"-Wl,-wrap,calloc",
|
||||||
|
"-Wl,-wrap,free",
|
||||||
|
"-Wl,-wrap,malloc",
|
||||||
|
"-Wl,-wrap,memalign",
|
||||||
|
"-Wl,-wrap,posix_memalign",
|
||||||
|
"-Wl,-wrap,pvalloc",
|
||||||
|
"-Wl,-wrap,realloc",
|
||||||
|
"-Wl,-wrap,valloc",
|
||||||
|
|
||||||
|
# Not allocating memory, but part of the API
|
||||||
|
"-Wl,-wrap,malloc_usable_size",
|
||||||
|
|
||||||
|
# <stdlib.h> functions
|
||||||
|
"-Wl,-wrap,realpath",
|
||||||
|
|
||||||
|
# <string.h> functions
|
||||||
|
"-Wl,-wrap,strdup",
|
||||||
|
"-Wl,-wrap,strndup",
|
||||||
|
|
||||||
|
# <unistd.h> functions
|
||||||
|
"-Wl,-wrap,getcwd",
|
||||||
|
|
||||||
|
# <stdio.h> functions
|
||||||
|
"-Wl,-wrap,asprintf",
|
||||||
|
"-Wl,-wrap,vasprintf",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("mac_no_default_new_delete_symbols") {
|
||||||
|
if (!is_component_build) {
|
||||||
|
# This is already set when we compile libc++, see
|
||||||
|
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as well,
|
||||||
|
# since the shim defines the symbols, to prevent them being exported.
|
||||||
|
cflags = [ "-fvisibility-global-new-delete-hidden" ]
|
||||||
|
}
|
||||||
|
}
|
3
src/base/allocator/DIR_METADATA
Normal file
3
src/base/allocator/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Internals"
|
||||||
|
}
|
8
src/base/allocator/OWNERS
Normal file
8
src/base/allocator/OWNERS
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
lizeb@chromium.org
|
||||||
|
primiano@chromium.org
|
||||||
|
wfh@chromium.org
|
||||||
|
|
||||||
|
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
|
||||||
|
per-file allocator_shim_default_dispatch_to_partition_alloc*=file://base/allocator/partition_allocator/OWNERS
|
||||||
|
per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
|
||||||
|
per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS
|
172
src/base/allocator/README.md
Normal file
172
src/base/allocator/README.md
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
This document describes how malloc / new calls are routed in the various Chrome
|
||||||
|
platforms.
|
||||||
|
|
||||||
|
Bare in mind that the chromium codebase does not always just use `malloc()`.
|
||||||
|
Some examples:
|
||||||
|
- Large parts of the renderer (Blink) use two home-brewed allocators,
|
||||||
|
PartitionAlloc and BlinkGC (Oilpan).
|
||||||
|
- Some subsystems, such as the V8 JavaScript engine, handle memory management
|
||||||
|
autonomously.
|
||||||
|
- Various parts of the codebase use abstractions such as `SharedMemory` or
|
||||||
|
`DiscardableMemory` which, similarly to the above, have their own page-level
|
||||||
|
memory management.
|
||||||
|
|
||||||
|
Background
|
||||||
|
----------
|
||||||
|
The `allocator` target defines at compile-time the platform-specific choice of
|
||||||
|
the allocator and extra-hooks which services calls to malloc/new. The relevant
|
||||||
|
build-time flags involved are `use_allocator` and `use_allocator_shim`.
|
||||||
|
|
||||||
|
The default choices are as follows:
|
||||||
|
|
||||||
|
**Windows**
|
||||||
|
`use_allocator: winheap`, the default Windows heap.
|
||||||
|
Additionally, `static_library` (i.e. non-component) builds have a shim
|
||||||
|
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
|
||||||
|
The shim layer provides extra security features, such as preventing large
|
||||||
|
allocations that can hit signed vs. unsigned bugs in third_party code.
|
||||||
|
|
||||||
|
**Android**
|
||||||
|
`use_allocator: none`, always use the allocator symbols coming from Android's
|
||||||
|
libc (Bionic). As it is developed as part of the OS, it is considered to be
|
||||||
|
optimized for small devices and more memory-efficient than other choices.
|
||||||
|
The actual implementation backing malloc symbols in Bionic is up to the board
|
||||||
|
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
|
||||||
|
|
||||||
|
**Mac/iOS**
|
||||||
|
`use_allocator: none`, we always use the system's allocator implementation.
|
||||||
|
|
||||||
|
In addition, when building for `asan` / `msan` both the allocator and the shim
|
||||||
|
layer are disabled.
|
||||||
|
|
||||||
|
|
||||||
|
Layering and build deps
|
||||||
|
-----------------------
|
||||||
|
The `allocator` target provides the linker flags required for the Windows shim
|
||||||
|
layer. The `base` target is (almost) the only one depending on `allocator`. No
|
||||||
|
other targets should depend on it, with the exception of the very few
|
||||||
|
executables / dynamic libraries that don't depend, either directly or
|
||||||
|
indirectly, on `base` within the scope of a linker unit.
|
||||||
|
|
||||||
|
More importantly, **no other place outside of `/base` should depend on the
|
||||||
|
specific allocator**.
|
||||||
|
If such a functional dependency is required that should be achieved using
|
||||||
|
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
|
||||||
|
`/base/memory/`)
|
||||||
|
|
||||||
|
**Why `base` depends on `allocator`?**
|
||||||
|
Because it needs to provide services that depend on the actual allocator
|
||||||
|
implementation. In the past `base` used to pretend to be allocator-agnostic
|
||||||
|
and get the dependencies injected by other layers. This ended up being an
|
||||||
|
inconsistent mess.
|
||||||
|
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
|
||||||
|
|
||||||
|
Linker unit targets (executables and shared libraries) that depend in some way
|
||||||
|
on `base` (most of the targets in the codebase) automatically get the correct
|
||||||
|
set of linker flags to pull in the Windows shim-layer (if needed).
|
||||||
|
|
||||||
|
|
||||||
|
Source code
|
||||||
|
-----------
|
||||||
|
This directory contains just the allocator (i.e. shim) layer that switches
|
||||||
|
between the different underlying memory allocation implementations.
|
||||||
|
|
||||||
|
|
||||||
|
Unified allocator shim
|
||||||
|
----------------------
|
||||||
|
On most platforms, Chrome overrides the malloc / operator new symbols (and
|
||||||
|
corresponding free / delete and other variants). This is to enforce security
|
||||||
|
checks and lately to enable the
|
||||||
|
[memory-infra heap profiler][url-memory-infra-heap-profiler].
|
||||||
|
Historically each platform had its special logic for defining the allocator
|
||||||
|
symbols in different places of the codebase. The unified allocator shim is
|
||||||
|
a project aimed to unify the symbol definition and allocator routing logic in
|
||||||
|
a central place.
|
||||||
|
|
||||||
|
- Full documentation: [Allocator shim design doc][url-allocator-shim].
|
||||||
|
- Current state: Available and enabled by default on Android, CrOS, Linux,
|
||||||
|
Mac OS and Windows.
|
||||||
|
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
|
||||||
|
- Build-time flag: `use_allocator_shim`.
|
||||||
|
|
||||||
|
**Overview of the unified allocator shim**
|
||||||
|
The allocator shim consists of three stages:
|
||||||
|
```
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| malloc & friends | -> | shim layer | -> | Routing to |
|
||||||
|
| symbols definition | | implementation | | allocator |
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| - libc symbols (malloc, | | - Security checks | | - glibc |
|
||||||
|
| calloc, free, ...) | | - Chain of dispatchers| | - Android |
|
||||||
|
| - C++ symbols (operator | | that can intercept | | bionic |
|
||||||
|
| new, delete, ...) | | and override | | - WinHeap |
|
||||||
|
| - glibc weak symbols | | allocations | | - Partition |
|
||||||
|
| (__libc_malloc, ...) | +-----------------------+ | Alloc |
|
||||||
|
+-------------------------+ +----------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
**1. malloc symbols definition**
|
||||||
|
This stage takes care of overriding the symbols `malloc`, `free`,
|
||||||
|
`operator new`, `operator delete` and friends and routing those calls inside the
|
||||||
|
allocator shim (next point).
|
||||||
|
This is taken care of by the headers in `allocator_shim_override_*`.
|
||||||
|
|
||||||
|
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
|
||||||
|
can override in `allocator_shim_override_ucr_symbols_win.h`.
|
||||||
|
|
||||||
|
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
|
||||||
|
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
|
||||||
|
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
|
||||||
|
`operator delete` and friends).
|
||||||
|
This enables proper interposition of malloc symbols referenced by the main
|
||||||
|
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
|
||||||
|
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
|
||||||
|
The Linux/CrOS shim was introduced by
|
||||||
|
[crrev.com/1675143004](https://crrev.com/1675143004).
|
||||||
|
|
||||||
|
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
|
||||||
|
possible. This is because Android processes are `fork()`-ed from the Android
|
||||||
|
zygote, which pre-loads libc.so and only later native code gets loaded via
|
||||||
|
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
|
||||||
|
scope).
|
||||||
|
In this case, the approach instead of wrapping symbol resolution at link time
|
||||||
|
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
|
||||||
|
The use of this wrapping flag causes:
|
||||||
|
- All references to allocator symbols in the Chrome codebase to be rewritten as
|
||||||
|
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
|
||||||
|
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
|
||||||
|
route allocator calls inside the shim layer.
|
||||||
|
- The reference to the original `malloc` symbols (which typically is defined by
|
||||||
|
the system's libc.so) are accessible via the special `__real_malloc` and
|
||||||
|
friends symbols (which will be relocated, at load time, against `malloc`).
|
||||||
|
|
||||||
|
In summary, this approach is transparent to the dynamic loader, which still sees
|
||||||
|
undefined symbol references to malloc symbols.
|
||||||
|
These symbols will be resolved against libc.so as usual.
|
||||||
|
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
|
||||||
|
|
||||||
|
**2. Shim layer implementation**
|
||||||
|
This stage contains the actual shim implementation. This consists of:
|
||||||
|
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
|
||||||
|
(using the `InsertAllocatorDispatch` API). They can intercept and override
|
||||||
|
allocator calls.
|
||||||
|
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
|
||||||
|
This happens inside `allocator_shim.cc`
|
||||||
|
|
||||||
|
**3. Final allocator routing**
|
||||||
|
The final element of the aforementioned dispatcher chain is statically defined
|
||||||
|
at build time and ultimately routes the allocator calls to the actual allocator
|
||||||
|
(as described in the *Background* section above). This is taken care of by the
|
||||||
|
headers in `allocator_shim_default_dispatch_to_*` files.
|
||||||
|
|
||||||
|
|
||||||
|
Related links
|
||||||
|
-------------
|
||||||
|
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
|
||||||
|
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
|
||||||
|
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
|
||||||
|
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
|
||||||
|
|
||||||
|
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
|
||||||
|
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
|
||||||
|
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
|
157
src/base/allocator/allocator.gni
Normal file
157
src/base/allocator/allocator.gni
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
# Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//build/config/chromecast_build.gni")
|
||||||
|
import("//build/config/sanitizers/sanitizers.gni")
|
||||||
|
|
||||||
|
if (is_ios) {
|
||||||
|
import("//build/config/ios/ios_sdk.gni")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sanitizers replace the allocator, don't use our own.
|
||||||
|
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
|
||||||
|
|
||||||
|
# - Component build support is disabled on all platforms. It is known to cause
|
||||||
|
# issues on some (e.g. Windows with shims, Android with non-universal symbol
|
||||||
|
# wrapping), and has not been validated on others.
|
||||||
|
# - Windows: debug CRT is not compatible, see below.
|
||||||
|
_disable_partition_alloc = is_component_build || (is_win && is_debug)
|
||||||
|
|
||||||
|
# - NaCl: No plans to support it.
|
||||||
|
# - iOS: not done yet.
|
||||||
|
_is_partition_alloc_platform = !is_nacl && !is_ios
|
||||||
|
|
||||||
|
# Under Windows Debug the allocator shim is not compatible with CRT.
|
||||||
|
# NaCl in particular does seem to link some binaries statically
|
||||||
|
# against the debug CRT with "is_nacl=false".
|
||||||
|
# Under Fuchsia the allocator shim is only required for PA-E.
|
||||||
|
# For all other platforms & configurations, the shim is required, to replace
|
||||||
|
# the default system allocators, e.g. with Partition Alloc.
|
||||||
|
if ((is_linux || is_chromeos || is_android || is_apple ||
|
||||||
|
(is_fuchsia && !_disable_partition_alloc) ||
|
||||||
|
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
|
||||||
|
_default_use_allocator_shim = true
|
||||||
|
} else {
|
||||||
|
_default_use_allocator_shim = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
|
||||||
|
!_disable_partition_alloc) {
|
||||||
|
_default_allocator = "partition"
|
||||||
|
} else {
|
||||||
|
_default_allocator = "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Memory allocator to use. Set to "none" to use default allocator.
|
||||||
|
use_allocator = _default_allocator
|
||||||
|
|
||||||
|
# Causes all the allocations to be routed via allocator_shim.cc.
|
||||||
|
use_allocator_shim = _default_use_allocator_shim
|
||||||
|
|
||||||
|
# Whether PartitionAlloc should be available for use or not.
|
||||||
|
# true makes PartitionAlloc linked to the executable or shared library and
|
||||||
|
# makes it available for use. It doesn't mean that the default allocator
|
||||||
|
# is PartitionAlloc, which is governed by |use_allocator|.
|
||||||
|
#
|
||||||
|
# This flag is currently set to false only on Cronet bots, because Cronet
|
||||||
|
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
|
||||||
|
# size increase (crbug.com/674570).
|
||||||
|
use_partition_alloc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_partition_alloc && use_allocator == "partition") {
|
||||||
|
# If there is a conflict, prioritize |use_partition_alloc| over
|
||||||
|
# |use_allocator|.
|
||||||
|
use_allocator = "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(use_allocator == "none" || use_allocator == "partition")
|
||||||
|
|
||||||
|
assert(
|
||||||
|
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
|
||||||
|
is_fuchsia || is_apple,
|
||||||
|
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
|
||||||
|
"and Windows.")
|
||||||
|
|
||||||
|
if (is_win && use_allocator_shim) {
|
||||||
|
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
|
||||||
|
assert(!is_component_build,
|
||||||
|
"The allocator shim doesn't work for the component build on Windows.")
|
||||||
|
}
|
||||||
|
|
||||||
|
_is_brp_supported = (is_win || is_android || is_linux || is_mac ||
|
||||||
|
is_chromeos) && use_allocator == "partition"
|
||||||
|
|
||||||
|
_is_mcp_supported = is_win && use_allocator == "partition"
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# We jam MTECheckedPtr off by default, but can set it to
|
||||||
|
# `_is_mcp_supported` to activate it.
|
||||||
|
use_mte_checked_ptr = false
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
|
||||||
|
# of raw_ptr<T>, and enable PartitionAlloc support for it.
|
||||||
|
# We also disable BRP in the presence of MTECheckedPtr, which is almost
|
||||||
|
# never enabled.
|
||||||
|
use_backup_ref_ptr = _is_brp_supported && !use_mte_checked_ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
|
||||||
|
"MTECheckedPtr conflicts with BRP.")
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# If BRP is enabled, additional options are available:
|
||||||
|
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
|
||||||
|
# previous slot (or in metadata if a slot starts on the page boundary), as
|
||||||
|
# opposed to the beginning of the slot.
|
||||||
|
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
|
||||||
|
# are too expensive to have on by default.
|
||||||
|
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
|
||||||
|
# dangling during their lifetime.
|
||||||
|
put_ref_count_in_previous_slot = use_backup_ref_ptr
|
||||||
|
enable_backup_ref_ptr_slow_checks = false
|
||||||
|
enable_dangling_raw_ptr_checks = false
|
||||||
|
|
||||||
|
# The supported platforms are supposed to match `_is_brp_supported`, but we
|
||||||
|
# enable the feature on Linux early because it's most widely used for security
|
||||||
|
# research
|
||||||
|
use_asan_backup_ref_ptr = is_asan && (is_win || is_android || is_linux)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
|
||||||
|
# In theory, such a configuration is possible, but its scope would be limited to
|
||||||
|
# only Blink partitions, which is currently not tested. Better to trigger an
|
||||||
|
# error, than have BackupRefPtr silently disabled while believing it is enabled.
|
||||||
|
if (!is_nacl) {
|
||||||
|
assert(!use_backup_ref_ptr || use_allocator == "partition",
|
||||||
|
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
|
||||||
|
}
|
||||||
|
|
||||||
|
# put_ref_count_in_previous_slot can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(
|
||||||
|
use_backup_ref_ptr || !put_ref_count_in_previous_slot,
|
||||||
|
"Can't put ref count in the previous slot if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# enable_backup_ref_ptr_slow_checks can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(use_backup_ref_ptr || !enable_backup_ref_ptr_slow_checks,
|
||||||
|
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
|
||||||
|
|
||||||
|
# enable_dangling_raw_ptr_checks can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(
|
||||||
|
use_backup_ref_ptr || !enable_dangling_raw_ptr_checks,
|
||||||
|
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
|
||||||
|
assert(
|
||||||
|
!use_backup_ref_ptr || !use_asan_backup_ref_ptr,
|
||||||
|
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
|
||||||
|
|
||||||
|
assert(!use_asan_backup_ref_ptr || is_asan,
|
||||||
|
"AsanBackupRefPtr requires AddressSanitizer")
|
40
src/base/allocator/allocator_check.cc
Normal file
40
src/base/allocator/allocator_check.cc
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_check.h"
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
bool IsAllocatorInitialized() {
|
||||||
|
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
// Set by allocator_shim_override_ucrt_symbols_win.h when the
|
||||||
|
// shimmed _set_new_mode() is called.
|
||||||
|
return g_is_win_shim_layer_initialized;
|
||||||
|
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
|
||||||
|
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// From allocator_interception_mac.mm.
|
||||||
|
return base::allocator::g_replaced_default_zone;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
18
src/base/allocator/allocator_check.h
Normal file
18
src/base/allocator/allocator_check.h
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
BASE_EXPORT bool IsAllocatorInitialized();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
15
src/base/allocator/allocator_extension.cc
Normal file
15
src/base/allocator/allocator_extension.cc
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_extension.h"
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void ReleaseFreeMemory() {}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
23
src/base/allocator/allocator_extension.h
Normal file
23
src/base/allocator/allocator_extension.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||||
|
|
||||||
|
#include <stddef.h> // for size_t
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
// Request that the allocator release any free memory it knows about to the
|
||||||
|
// system.
|
||||||
|
BASE_EXPORT void ReleaseFreeMemory();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
65
src/base/allocator/allocator_interception_mac.h
Normal file
65
src/base/allocator/allocator_interception_mac.h
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
struct MallocZoneFunctions;
|
||||||
|
|
||||||
|
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
|
||||||
|
// the functions in the current default malloc zone. This must be called before
|
||||||
|
// the default malloc zone is changed to have its intended effect.
|
||||||
|
void InitializeDefaultDispatchToMacAllocator();
|
||||||
|
|
||||||
|
// Saves the function pointers currently used by the default zone.
|
||||||
|
void StoreFunctionsForDefaultZone();
|
||||||
|
|
||||||
|
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
|
||||||
|
void StoreFunctionsForAllZones();
|
||||||
|
|
||||||
|
// For all malloc zones that have been stored, replace their functions with
|
||||||
|
// |functions|.
|
||||||
|
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
extern bool g_replaced_default_zone;
|
||||||
|
|
||||||
|
// Calls the original implementation of malloc/calloc prior to interception.
|
||||||
|
bool UncheckedMallocMac(size_t size, void** result);
|
||||||
|
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
|
||||||
|
|
||||||
|
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
|
||||||
|
// Foundation and Objective-C allocations.
|
||||||
|
// Has no effect on the default malloc zone if the allocator shim already
|
||||||
|
// performs that interception.
|
||||||
|
BASE_EXPORT void InterceptAllocationsMac();
|
||||||
|
|
||||||
|
// Updates all malloc zones to use their original functions.
|
||||||
|
// Also calls ClearAllMallocZonesForTesting.
|
||||||
|
BASE_EXPORT void UninterceptMallocZonesForTesting();
|
||||||
|
|
||||||
|
// Returns true if allocations are successfully being intercepted for all malloc
|
||||||
|
// zones.
|
||||||
|
bool AreMallocZonesIntercepted();
|
||||||
|
|
||||||
|
// Periodically checks for, and shims new malloc zones. Stops checking after 1
|
||||||
|
// minute.
|
||||||
|
BASE_EXPORT void PeriodicallyShimNewMallocZones();
|
||||||
|
|
||||||
|
// Exposed for testing.
|
||||||
|
BASE_EXPORT void ShimNewMallocZones();
|
||||||
|
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
611
src/base/allocator/allocator_interception_mac.mm
Normal file
611
src/base/allocator/allocator_interception_mac.mm
Normal file
@ -0,0 +1,611 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains all the logic necessary to intercept allocations on
|
||||||
|
// macOS. "malloc zones" are an abstraction that allows the process to intercept
|
||||||
|
// all malloc-related functions. There is no good mechanism [short of
|
||||||
|
// interposition] to determine new malloc zones are added, so there's no clean
|
||||||
|
// mechanism to intercept all malloc zones. This file contains logic to
|
||||||
|
// intercept the default and purgeable zones, which always exist. A cursory
|
||||||
|
// review of Chrome seems to imply that non-default zones are almost never used.
|
||||||
|
//
|
||||||
|
// This file also contains logic to intercept Core Foundation and Objective-C
|
||||||
|
// allocations. The implementations forward to the default malloc zone, so the
|
||||||
|
// only reason to intercept these calls is to re-label OOM crashes with slightly
|
||||||
|
// more details.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
|
||||||
|
#include <CoreFoundation/CoreFoundation.h>
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#import <objc/runtime.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
#include "base/bind.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/logging.h"
|
||||||
|
#include "base/mac/mach_logging.h"
|
||||||
|
#include "base/process/memory.h"
|
||||||
|
#include "base/threading/sequenced_task_runner_handle.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "third_party/apple_apsl/CFBase.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
#include "base/ios/ios_util.h"
|
||||||
|
#else
|
||||||
|
#include "base/mac/mac_util.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator {
|
||||||
|
|
||||||
|
bool g_replaced_default_zone = false;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
bool g_oom_killer_enabled;
|
||||||
|
bool g_allocator_shims_failed_to_install;
|
||||||
|
|
||||||
|
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
|
||||||
|
// read-only, to prevent them from being overwritten in an attack. However,
|
||||||
|
// blindly unprotecting and reprotecting the zone allocators fails with
|
||||||
|
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
|
||||||
|
// memory in its bss. Explicit saving/restoring of the protection is required.
|
||||||
|
//
|
||||||
|
// This function takes a pointer to a malloc zone, de-protects it if necessary,
|
||||||
|
// and returns (in the out parameters) a region of memory (if any) to be
|
||||||
|
// re-protected when modifications are complete. This approach assumes that
|
||||||
|
// there is no contention for the protection of this memory.
|
||||||
|
//
|
||||||
|
// Returns true if the malloc zone was properly de-protected, or false
|
||||||
|
// otherwise. If this function returns false, the out parameters are invalid and
|
||||||
|
// the region does not need to be re-protected.
|
||||||
|
bool DeprotectMallocZone(ChromeMallocZone* default_zone,
|
||||||
|
vm_address_t* reprotection_start,
|
||||||
|
vm_size_t* reprotection_length,
|
||||||
|
vm_prot_t* reprotection_value) {
|
||||||
|
mach_port_t unused;
|
||||||
|
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
|
||||||
|
struct vm_region_basic_info_64 info;
|
||||||
|
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
||||||
|
kern_return_t result =
|
||||||
|
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
|
||||||
|
VM_REGION_BASIC_INFO_64,
|
||||||
|
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
MACH_LOG(ERROR, result) << "vm_region_64";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
||||||
|
// balance it with a deallocate in case this ever changes. See
|
||||||
|
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
|
||||||
|
mach_port_deallocate(mach_task_self(), unused);
|
||||||
|
|
||||||
|
if (!(info.max_protection & VM_PROT_WRITE)) {
|
||||||
|
LOG(ERROR) << "Invalid max_protection " << info.max_protection;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does the region fully enclose the zone pointers? Possibly unwarranted
|
||||||
|
// simplification used: using the size of a full version 10 malloc zone rather
|
||||||
|
// than the actual smaller size if the passed-in zone is not version 10.
|
||||||
|
DCHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
|
||||||
|
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
|
||||||
|
reinterpret_cast<vm_address_t>(*reprotection_start);
|
||||||
|
DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
||||||
|
|
||||||
|
if (info.protection & VM_PROT_WRITE) {
|
||||||
|
// No change needed; the zone is already writable.
|
||||||
|
*reprotection_start = 0;
|
||||||
|
*reprotection_length = 0;
|
||||||
|
*reprotection_value = VM_PROT_NONE;
|
||||||
|
} else {
|
||||||
|
*reprotection_value = info.protection;
|
||||||
|
result =
|
||||||
|
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
|
||||||
|
false, info.protection | VM_PROT_WRITE);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
MACH_LOG(ERROR, result) << "vm_protect";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
MallocZoneFunctions g_old_zone;
|
||||||
|
MallocZoneFunctions g_old_purgeable_zone;
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_zone.malloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_calloc(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_zone.calloc(zone, num_items, size);
|
||||||
|
if (!result && num_items && size)
|
||||||
|
TerminateBecauseOutOfMemory(num_items * size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_zone.valloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
|
||||||
|
g_old_zone.free(zone, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
void* result = g_old_zone.realloc(zone, ptr, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_memalign(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_zone.memalign(zone, alignment, size);
|
||||||
|
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||||
|
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||||
|
if (!result && size && alignment >= sizeof(void*) &&
|
||||||
|
base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.malloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
|
||||||
|
if (!result && num_items && size)
|
||||||
|
TerminateBecauseOutOfMemory(num_items * size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.valloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
|
||||||
|
g_old_purgeable_zone.free(zone, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
|
||||||
|
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||||
|
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||||
|
if (!result && size && alignment >= sizeof(void*) &&
|
||||||
|
base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
// === Core Foundation CFAllocators ===
|
||||||
|
|
||||||
|
bool CanGetContextForCFAllocator() {
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
return !base::ios::IsRunningOnOrLater(17, 0, 0);
|
||||||
|
#else
|
||||||
|
return !base::mac::IsOSLaterThan13_DontCallThis();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
|
||||||
|
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
|
||||||
|
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
||||||
|
return &our_allocator->_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
// === Cocoa NSObject allocation ===
|
||||||
|
|
||||||
|
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
|
||||||
|
allocWithZone_t g_old_allocWithZone;
|
||||||
|
|
||||||
|
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
|
||||||
|
id result = g_old_allocWithZone(self, _cmd, zone);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
|
||||||
|
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
|
||||||
|
if (!IsMallocZoneAlreadyStored(chrome_zone))
|
||||||
|
return;
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
|
||||||
|
ReplaceZoneFunctions(chrome_zone, &functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool UncheckedMallocMac(size_t size, void** result) {
|
||||||
|
#if defined(ADDRESS_SANITIZER)
|
||||||
|
*result = malloc(size);
|
||||||
|
#else
|
||||||
|
if (g_old_zone.malloc) {
|
||||||
|
*result = g_old_zone.malloc(malloc_default_zone(), size);
|
||||||
|
} else {
|
||||||
|
*result = malloc(size);
|
||||||
|
}
|
||||||
|
#endif // defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
return *result != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
|
||||||
|
#if defined(ADDRESS_SANITIZER)
|
||||||
|
*result = calloc(num_items, size);
|
||||||
|
#else
|
||||||
|
if (g_old_zone.calloc) {
|
||||||
|
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
|
||||||
|
} else {
|
||||||
|
*result = calloc(num_items, size);
|
||||||
|
}
|
||||||
|
#endif // defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
return *result != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeDefaultDispatchToMacAllocator() {
|
||||||
|
StoreFunctionsForAllZones();
|
||||||
|
}
|
||||||
|
|
||||||
|
void StoreFunctionsForDefaultZone() {
|
||||||
|
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
|
||||||
|
malloc_default_zone());
|
||||||
|
StoreMallocZone(default_zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
void StoreFunctionsForAllZones() {
|
||||||
|
// This ensures that the default zone is always at the front of the array,
|
||||||
|
// which is important for performance.
|
||||||
|
StoreFunctionsForDefaultZone();
|
||||||
|
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||||
|
if (kr != KERN_SUCCESS)
|
||||||
|
return;
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||||
|
StoreMallocZone(zone);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
|
||||||
|
// The default zone does not get returned in malloc_get_all_zones().
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
|
||||||
|
ReplaceZoneFunctions(default_zone, functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
|
||||||
|
if (kr != KERN_SUCCESS)
|
||||||
|
return;
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||||
|
if (DoesMallocZoneNeedReplacing(zone, functions)) {
|
||||||
|
ReplaceZoneFunctions(zone, functions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g_replaced_default_zone = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterceptAllocationsMac() {
|
||||||
|
if (g_oom_killer_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
g_oom_killer_enabled = true;
|
||||||
|
|
||||||
|
// === C malloc/calloc/valloc/realloc/posix_memalign ===
|
||||||
|
|
||||||
|
// This approach is not perfect, as requests for amounts of memory larger than
|
||||||
|
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
|
||||||
|
// fail with a NULL rather than dying (see malloc_zone_malloc() in
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
|
||||||
|
// details). Unfortunately, it's the best we can do. Also note that this does
|
||||||
|
// not affect allocations from non-default zones.
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
// Don't do anything special on OOM for the malloc zones replaced by
|
||||||
|
// AddressSanitizer, as modifying or protecting them may not work correctly.
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// The malloc zone backed by PartitionAlloc crashes by default, so there is
|
||||||
|
// no need to install the OOM killer.
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
if (!IsMallocZoneAlreadyStored(default_zone)) {
|
||||||
|
StoreZoneFunctions(default_zone, &g_old_zone);
|
||||||
|
MallocZoneFunctions new_functions = {};
|
||||||
|
new_functions.malloc = oom_killer_malloc;
|
||||||
|
new_functions.calloc = oom_killer_calloc;
|
||||||
|
new_functions.valloc = oom_killer_valloc;
|
||||||
|
new_functions.free = oom_killer_free;
|
||||||
|
new_functions.realloc = oom_killer_realloc;
|
||||||
|
new_functions.memalign = oom_killer_memalign;
|
||||||
|
|
||||||
|
ReplaceZoneFunctions(default_zone, &new_functions);
|
||||||
|
g_replaced_default_zone = true;
|
||||||
|
}
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
ChromeMallocZone* purgeable_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
|
||||||
|
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
|
||||||
|
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
|
||||||
|
MallocZoneFunctions new_functions = {};
|
||||||
|
new_functions.malloc = oom_killer_malloc_purgeable;
|
||||||
|
new_functions.calloc = oom_killer_calloc_purgeable;
|
||||||
|
new_functions.valloc = oom_killer_valloc_purgeable;
|
||||||
|
new_functions.free = oom_killer_free_purgeable;
|
||||||
|
new_functions.realloc = oom_killer_realloc_purgeable;
|
||||||
|
new_functions.memalign = oom_killer_memalign_purgeable;
|
||||||
|
ReplaceZoneFunctions(purgeable_zone, &new_functions);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// === C malloc_zone_batch_malloc ===
|
||||||
|
|
||||||
|
// batch_malloc is omitted because the default malloc zone's implementation
|
||||||
|
// only supports batch_malloc for "tiny" allocations from the free list. It
|
||||||
|
// will fail for allocations larger than "tiny", and will only allocate as
|
||||||
|
// many blocks as it's able to from the free list. These factors mean that it
|
||||||
|
// can return less than the requested memory even in a non-out-of-memory
|
||||||
|
// situation. There's no good way to detect whether a batch_malloc failure is
|
||||||
|
// due to these other factors, or due to genuine memory or address space
|
||||||
|
// exhaustion. The fact that it only allocates space from the "tiny" free list
|
||||||
|
// means that it's likely that a failure will not be due to memory exhaustion.
|
||||||
|
// Similarly, these constraints on batch_malloc mean that callers must always
|
||||||
|
// be expecting to receive less memory than was requested, even in situations
|
||||||
|
// where memory pressure is not a concern. Finally, the only public interface
|
||||||
|
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
|
||||||
|
// system's malloc implementation. It's unlikely that anyone's even heard of
|
||||||
|
// it.
|
||||||
|
|
||||||
|
#ifndef ADDRESS_SANITIZER
|
||||||
|
// === Core Foundation CFAllocators ===
|
||||||
|
|
||||||
|
// This will not catch allocation done by custom allocators, but will catch
|
||||||
|
// all allocation done by system-provided ones.
|
||||||
|
|
||||||
|
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
|
||||||
|
!g_old_cfallocator_malloc_zone)
|
||||||
|
<< "Old allocators unexpectedly non-null";
|
||||||
|
|
||||||
|
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
|
||||||
|
|
||||||
|
if (cf_allocator_internals_known) {
|
||||||
|
CFAllocatorContext* context =
|
||||||
|
ContextForCFAllocator(kCFAllocatorSystemDefault);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
|
||||||
|
g_old_cfallocator_system_default = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_system_default)
|
||||||
|
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_system_default;
|
||||||
|
|
||||||
|
context = ContextForCFAllocator(kCFAllocatorMalloc);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
|
||||||
|
g_old_cfallocator_malloc = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_malloc)
|
||||||
|
<< "Failed to get kCFAllocatorMalloc allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_malloc;
|
||||||
|
|
||||||
|
context = ContextForCFAllocator(kCFAllocatorMallocZone);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
|
||||||
|
g_old_cfallocator_malloc_zone = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_malloc_zone)
|
||||||
|
<< "Failed to get kCFAllocatorMallocZone allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_malloc_zone;
|
||||||
|
} else {
|
||||||
|
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
|
||||||
|
"failures via CFAllocator will not result in termination. "
|
||||||
|
"http://crbug.com/45650";
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// === Cocoa NSObject allocation ===
|
||||||
|
|
||||||
|
// Note that both +[NSObject new] and +[NSObject alloc] call through to
|
||||||
|
// +[NSObject allocWithZone:].
|
||||||
|
|
||||||
|
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
|
||||||
|
|
||||||
|
Class nsobject_class = [NSObject class];
|
||||||
|
Method orig_method =
|
||||||
|
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
|
||||||
|
g_old_allocWithZone =
|
||||||
|
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
|
||||||
|
CHECK(g_old_allocWithZone)
|
||||||
|
<< "Failed to get allocWithZone allocation function.";
|
||||||
|
method_setImplementation(orig_method,
|
||||||
|
reinterpret_cast<IMP>(oom_killer_allocWithZone));
|
||||||
|
}
|
||||||
|
|
||||||
|
void UninterceptMallocZonesForTesting() {
|
||||||
|
UninterceptMallocZoneForTesting(malloc_default_zone());
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||||
|
CHECK(kr == KERN_SUCCESS);
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
UninterceptMallocZoneForTesting(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
ClearAllMallocZonesForTesting();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AreMallocZonesIntercepted() {
|
||||||
|
return !g_allocator_shims_failed_to_install;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void ShimNewMallocZonesAndReschedule(base::Time end_time,
|
||||||
|
base::TimeDelta delay) {
|
||||||
|
ShimNewMallocZones();
|
||||||
|
|
||||||
|
if (base::Time::Now() > end_time)
|
||||||
|
return;
|
||||||
|
|
||||||
|
base::TimeDelta next_delay = delay * 2;
|
||||||
|
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE,
|
||||||
|
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
|
||||||
|
delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void PeriodicallyShimNewMallocZones() {
|
||||||
|
base::Time end_time = base::Time::Now() + base::Minutes(1);
|
||||||
|
base::TimeDelta initial_delay = base::Seconds(1);
|
||||||
|
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ShimNewMallocZones() {
|
||||||
|
StoreFunctionsForAllZones();
|
||||||
|
|
||||||
|
// Use the functions for the default zone as a template to replace those
|
||||||
|
// new zones.
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
DCHECK(IsMallocZoneAlreadyStored(default_zone));
|
||||||
|
|
||||||
|
MallocZoneFunctions new_functions;
|
||||||
|
StoreZoneFunctions(default_zone, &new_functions);
|
||||||
|
ReplaceFunctionsForStoredZones(&new_functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions) {
|
||||||
|
// Remove protection.
|
||||||
|
vm_address_t reprotection_start = 0;
|
||||||
|
vm_size_t reprotection_length = 0;
|
||||||
|
vm_prot_t reprotection_value = VM_PROT_NONE;
|
||||||
|
bool success = DeprotectMallocZone(zone, &reprotection_start,
|
||||||
|
&reprotection_length, &reprotection_value);
|
||||||
|
if (!success) {
|
||||||
|
g_allocator_shims_failed_to_install = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||||
|
functions->free && functions->realloc);
|
||||||
|
zone->malloc = functions->malloc;
|
||||||
|
zone->calloc = functions->calloc;
|
||||||
|
zone->valloc = functions->valloc;
|
||||||
|
zone->free = functions->free;
|
||||||
|
zone->realloc = functions->realloc;
|
||||||
|
if (functions->batch_malloc)
|
||||||
|
zone->batch_malloc = functions->batch_malloc;
|
||||||
|
if (functions->batch_free)
|
||||||
|
zone->batch_free = functions->batch_free;
|
||||||
|
if (functions->size)
|
||||||
|
zone->size = functions->size;
|
||||||
|
if (zone->version >= 5 && functions->memalign) {
|
||||||
|
zone->memalign = functions->memalign;
|
||||||
|
}
|
||||||
|
if (zone->version >= 6 && functions->free_definite_size) {
|
||||||
|
zone->free_definite_size = functions->free_definite_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore protection if it was active.
|
||||||
|
if (reprotection_start) {
|
||||||
|
kern_return_t result =
|
||||||
|
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
|
||||||
|
false, reprotection_value);
|
||||||
|
MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator
|
418
src/base/allocator/allocator_shim.cc
Normal file
418
src/base/allocator/allocator_shim.cc
Normal file
@ -0,0 +1,418 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
|
||||||
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/check_op.h"
|
||||||
|
#include "base/memory/page_size.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_WIN)
|
||||||
|
#include <unistd.h>
|
||||||
|
#else
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#include "base/mac/mach_logging.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// No calls to malloc / new in this file. They would would cause re-entrancy of
|
||||||
|
// the shim, which is hard to deal with. Keep this code as simple as possible
|
||||||
|
// and don't use any external C++ object here, not even //base ones. Even if
|
||||||
|
// they are safe to use today, in future they might be refactored.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
|
||||||
|
&base::allocator::AllocatorDispatch::default_dispatch};
|
||||||
|
|
||||||
|
bool g_call_new_handler_on_malloc_failure = false;
|
||||||
|
|
||||||
|
ALWAYS_INLINE size_t GetCachedPageSize() {
|
||||||
|
static size_t pagesize = 0;
|
||||||
|
if (!pagesize)
|
||||||
|
pagesize = base::GetPageSize();
|
||||||
|
return pagesize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls the std::new handler thread-safely. Returns true if a new_handler was
|
||||||
|
// set and called, false if no new_handler was set.
|
||||||
|
bool CallNewHandler(size_t size) {
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
return base::allocator::WinCallNewHandler(size);
|
||||||
|
#else
|
||||||
|
std::new_handler nh = std::get_new_handler();
|
||||||
|
if (!nh)
|
||||||
|
return false;
|
||||||
|
(*nh)();
|
||||||
|
// Assume the new_handler will abort if it fails. Exception are disabled and
|
||||||
|
// we don't support the case of a new_handler throwing std::bad_balloc.
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
|
||||||
|
return g_chain_head.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void SetCallNewHandlerOnMallocFailure(bool value) {
|
||||||
|
g_call_new_handler_on_malloc_failure = value;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
base::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(value);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void* UncheckedAlloc(size_t size) {
|
||||||
|
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UncheckedFree(void* ptr) {
|
||||||
|
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
|
||||||
|
// Loop in case of (an unlikely) race on setting the list head.
|
||||||
|
size_t kMaxRetries = 7;
|
||||||
|
for (size_t i = 0; i < kMaxRetries; ++i) {
|
||||||
|
const AllocatorDispatch* chain_head = GetChainHead();
|
||||||
|
dispatch->next = chain_head;
|
||||||
|
|
||||||
|
// This function guarantees to be thread-safe w.r.t. concurrent
|
||||||
|
// insertions. It also has to guarantee that all the threads always
|
||||||
|
// see a consistent chain, hence the atomic_thread_fence() below.
|
||||||
|
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
|
||||||
|
// we don't really want this to be a release-store with a corresponding
|
||||||
|
// acquire-load during malloc().
|
||||||
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||||
|
// Set the chain head to the new dispatch atomically. If we lose the race,
|
||||||
|
// retry.
|
||||||
|
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
|
||||||
|
std::memory_order_relaxed,
|
||||||
|
std::memory_order_relaxed)) {
|
||||||
|
// Success.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(false); // Too many retries, this shouldn't happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
|
||||||
|
DCHECK_EQ(GetChainHead(), dispatch);
|
||||||
|
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
// The Shim* functions below are the entry-points into the shim-layer and
|
||||||
|
// are supposed to be invoked by the allocator_shim_override_*
|
||||||
|
// headers to route the malloc / new symbols through the shim layer.
|
||||||
|
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
|
||||||
|
// between the system-defined entry points and the shim implementations.
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// The general pattern for allocations is:
|
||||||
|
// - Try to allocate, if succeded return the pointer.
|
||||||
|
// - If the allocation failed:
|
||||||
|
// - Call the std::new_handler if it was a C++ allocation.
|
||||||
|
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
|
||||||
|
// AND SetCallNewHandlerOnMallocFailure(true).
|
||||||
|
// - If the std::new_handler is NOT set just return nullptr.
|
||||||
|
// - If the std::new_handler is set:
|
||||||
|
// - Assume it will abort() if it fails (very likely the new_handler will
|
||||||
|
// just suicide printing a message).
|
||||||
|
// - Assume it did succeed if it returns, in which case reattempt the alloc.
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppNew(size_t size) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||||
|
} while (!ptr && CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->alloc_unchecked_function(chain_head, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimCppDelete(void* address) {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
|
||||||
|
// realloc(size == 0) means free() and might return a nullptr. We should
|
||||||
|
// not call the std::new_handler in that case, though.
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->realloc_function(chain_head, address, size, context);
|
||||||
|
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
|
||||||
|
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
|
||||||
|
// in tc_malloc.cc.
|
||||||
|
if (((alignment % sizeof(void*)) != 0) ||
|
||||||
|
!base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
void* ptr = ShimMemalign(alignment, size, nullptr);
|
||||||
|
*res = ptr;
|
||||||
|
return ptr ? 0 : ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
|
||||||
|
return ShimMemalign(GetCachedPageSize(), size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
|
||||||
|
// pvalloc(0) should allocate one page, according to its man page.
|
||||||
|
if (size == 0) {
|
||||||
|
size = GetCachedPageSize();
|
||||||
|
} else {
|
||||||
|
size = base::bits::AlignUp(size, GetCachedPageSize());
|
||||||
|
}
|
||||||
|
// The third argument is nullptr because pvalloc is glibc only and does not
|
||||||
|
// exist on OSX/BSD systems.
|
||||||
|
return ShimMemalign(GetCachedPageSize(), size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimFree(void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->get_size_estimate_function(
|
||||||
|
chain_head, const_cast<void*>(address), context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->batch_malloc_function(chain_head, size, results,
|
||||||
|
num_requested, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->batch_free_function(chain_head, to_be_freed,
|
||||||
|
num_to_be_freed, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_definite_size_function(chain_head, ptr, size,
|
||||||
|
context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr = nullptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
// _aligned_realloc(size == 0) means _aligned_free() and might return a
|
||||||
|
// nullptr. We should not call the std::new_handler in that case, though.
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr = nullptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
|
||||||
|
alignment, context);
|
||||||
|
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->aligned_free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_WIN) && \
|
||||||
|
!(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
|
||||||
|
// Cpp symbols (new / delete) should always be routed through the shim layer
|
||||||
|
// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
|
||||||
|
// malloc intercept is deep enough that it also catches the cpp calls.
|
||||||
|
//
|
||||||
|
// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
|
||||||
|
// base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
|
||||||
|
// in case of operator new() noexcept. Thus, operator new() noexcept needs to
|
||||||
|
// be routed to base::internal::PartitionMallocUnchecked through the shim layer.
|
||||||
|
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
// Android does not support symbol interposition. The way malloc symbols are
|
||||||
|
// intercepted on Android is by using link-time -wrap flags.
|
||||||
|
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
|
||||||
|
#elif BUILDFLAG(IS_WIN)
|
||||||
|
// On Windows we use plain link-time overriding of the CRT symbols.
|
||||||
|
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_mac_default_zone.h"
|
||||||
|
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_mac_symbols.h"
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#else
|
||||||
|
#include "base/allocator/allocator_shim_override_libc_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
|
||||||
|
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
|
||||||
|
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
|
||||||
|
// as we are then asked to free memory we didn't allocate.
|
||||||
|
//
|
||||||
|
// This only happened in glibc to allocate TLS storage metadata, and there are
|
||||||
|
// no other callers of __libc_memalign() there as of September 2020. To work
|
||||||
|
// around this issue, intercept this internal libc symbol to make sure that both
|
||||||
|
// the allocation and the free() are caught by the shim.
|
||||||
|
//
|
||||||
|
// This seems fragile, and is, but there is ample precedent for it, making it
|
||||||
|
// quite likely to keep working in the future. For instance, LLVM for LSAN uses
|
||||||
|
// this mechanism.
|
||||||
|
|
||||||
|
#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void InitializeAllocatorShim() {
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Prepares the default dispatch. After the intercepted malloc calls have
|
||||||
|
// traversed the shim this will route them to the default malloc zone.
|
||||||
|
InitializeDefaultDispatchToMacAllocator();
|
||||||
|
|
||||||
|
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
|
||||||
|
|
||||||
|
// This replaces the default malloc zone, causing calls to malloc & friends
|
||||||
|
// from the codebase to be routed to ShimMalloc() above.
|
||||||
|
base::allocator::ReplaceFunctionsForStoredZones(&functions);
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Cross-checks.
|
||||||
|
|
||||||
|
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
#error The allocator shim should not be compiled when building for memory tools.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
|
||||||
|
(defined(_MSC_VER) && defined(_CPPUNWIND))
|
||||||
|
#error This code cannot be used when exceptions are turned on.
|
||||||
|
#endif
|
202
src/base/allocator/allocator_shim.h
Normal file
202
src/base/allocator/allocator_shim.h
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/types/strong_alias.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_ALLOW_PCSCAN)
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
// Allocator Shim API. Allows to:
|
||||||
|
// - Configure the behavior of the allocator (what to do on OOM failures).
|
||||||
|
// - Install new hooks (AllocatorDispatch) in the allocator chain.
|
||||||
|
|
||||||
|
// When this shim layer is enabled, the route of an allocation is as-follows:
|
||||||
|
//
|
||||||
|
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
|
||||||
|
// The override_* headers define the symbols required to intercept calls to
|
||||||
|
// malloc() and operator new (if not overridden by specific C++ classes).
|
||||||
|
//
|
||||||
|
// [allocator_shim.cc] Routing allocation calls to the shim:
|
||||||
|
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
|
||||||
|
// ShimCppNew() etc. methods defined in allocator_shim.cc.
|
||||||
|
// These methods will: (1) forward the allocation call to the front of the
|
||||||
|
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
|
||||||
|
// call std::new_handler on OOM failure).
|
||||||
|
//
|
||||||
|
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
|
||||||
|
// It is a singly linked list where each element is a struct with function
|
||||||
|
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
|
||||||
|
// consists of a single AllocatorDispatch element, herein called
|
||||||
|
// the "default dispatch", which is statically defined at build time and
|
||||||
|
// ultimately routes the calls to the actual allocator defined by the build
|
||||||
|
// config (glibc, ...).
|
||||||
|
//
|
||||||
|
// It is possible to dynamically insert further AllocatorDispatch stages
|
||||||
|
// to the front of the chain, for debugging / profiling purposes.
|
||||||
|
//
|
||||||
|
// All the functions must be thread safe. The shim does not enforce any
|
||||||
|
// serialization. This is to route to thread-aware allocators without
|
||||||
|
// introducing unnecessary perf hits.
|
||||||
|
|
||||||
|
struct AllocatorDispatch {
|
||||||
|
using AllocFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocAlignedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using ReallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using FreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
// Returns the allocated size of user data (not including heap overhead).
|
||||||
|
// Can be larger than the requested size.
|
||||||
|
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context);
|
||||||
|
using BatchFreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context);
|
||||||
|
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AlignedMallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
using AlignedReallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
using AlignedFreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
AllocFn* const alloc_function;
|
||||||
|
AllocUncheckedFn* const alloc_unchecked_function;
|
||||||
|
AllocZeroInitializedFn* const alloc_zero_initialized_function;
|
||||||
|
AllocAlignedFn* const alloc_aligned_function;
|
||||||
|
ReallocFn* const realloc_function;
|
||||||
|
FreeFn* const free_function;
|
||||||
|
GetSizeEstimateFn* const get_size_estimate_function;
|
||||||
|
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
|
||||||
|
// and iOS allocators.
|
||||||
|
BatchMallocFn* const batch_malloc_function;
|
||||||
|
BatchFreeFn* const batch_free_function;
|
||||||
|
FreeDefiniteSizeFn* const free_definite_size_function;
|
||||||
|
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
|
||||||
|
// Windows allocator.
|
||||||
|
AlignedMallocFn* const aligned_malloc_function;
|
||||||
|
AlignedReallocFn* const aligned_realloc_function;
|
||||||
|
AlignedFreeFn* const aligned_free_function;
|
||||||
|
|
||||||
|
const AllocatorDispatch* next;
|
||||||
|
|
||||||
|
// |default_dispatch| is statically defined by one (and only one) of the
|
||||||
|
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
|
||||||
|
// configuration.
|
||||||
|
static const AllocatorDispatch default_dispatch;
|
||||||
|
};
|
||||||
|
|
||||||
|
// When true makes malloc behave like new, w.r.t calling the new_handler if
|
||||||
|
// the allocation fails (see set_new_mode() in Windows).
|
||||||
|
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
|
||||||
|
|
||||||
|
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
|
||||||
|
// regardless of SetCallNewHandlerOnMallocFailure().
|
||||||
|
BASE_EXPORT void* UncheckedAlloc(size_t size);
|
||||||
|
|
||||||
|
// Frees memory allocated with UncheckedAlloc().
|
||||||
|
BASE_EXPORT void UncheckedFree(void* ptr);
|
||||||
|
|
||||||
|
// Inserts |dispatch| in front of the allocator chain. This method is
|
||||||
|
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
|
||||||
|
// The callers have responsibility for inserting a single dispatch no more
|
||||||
|
// than once.
|
||||||
|
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
|
||||||
|
|
||||||
|
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
|
||||||
|
// removal of arbitrary elements from a singly linked list would require a lock
|
||||||
|
// in malloc(), which we really don't want.
|
||||||
|
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_WIN)
|
||||||
|
// Configures the allocator for the caller's allocation domain. Allocations that
|
||||||
|
// take place prior to this configuration step will succeed, but will not
|
||||||
|
// benefit from its one-time mitigations. As such, this function must be called
|
||||||
|
// as early as possible during startup.
|
||||||
|
BASE_EXPORT void ConfigurePartitionAlloc();
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
void InitializeDefaultAllocatorPartitionRoot();
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// On macOS, the allocator shim needs to be turned on during runtime.
|
||||||
|
BASE_EXPORT void InitializeAllocatorShim();
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
|
||||||
|
|
||||||
|
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
|
||||||
|
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
|
||||||
|
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
|
||||||
|
using UseDedicatedAlignedPartition =
|
||||||
|
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
|
||||||
|
using AlternateBucketDistribution =
|
||||||
|
base::StrongAlias<class AlternateBucketDistributionTag, bool>;
|
||||||
|
|
||||||
|
// If |thread_cache_on_non_quarantinable_partition| is specified, the
|
||||||
|
// thread-cache will be enabled on the non-quarantinable partition. The
|
||||||
|
// thread-cache on the main (malloc) partition will be disabled.
|
||||||
|
BASE_EXPORT void ConfigurePartitions(
|
||||||
|
EnableBrp enable_brp,
|
||||||
|
EnableBrpZapping enable_brp_zapping,
|
||||||
|
SplitMainPartition split_main_partition,
|
||||||
|
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||||
|
AlternateBucketDistribution use_alternate_bucket_distribution);
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
|
||||||
|
#endif
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
122
src/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
Normal file
122
src/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/numerics/checked_math.h"
|
||||||
|
#include "base/process/memory.h"
|
||||||
|
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
// This translation unit defines a default dispatch for the allocator shim which
|
||||||
|
// routes allocations to libc functions.
|
||||||
|
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
void* __libc_malloc(size_t size);
|
||||||
|
void* __libc_calloc(size_t n, size_t size);
|
||||||
|
void* __libc_realloc(void* address, size_t size);
|
||||||
|
void* __libc_memalign(size_t alignment, size_t size);
|
||||||
|
void __libc_free(void* ptr);
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
// Strictly speaking, it would make more sense to not subtract amything, but
|
||||||
|
// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
|
||||||
|
// most platforms), and tests expect that.
|
||||||
|
constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
|
||||||
|
|
||||||
|
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
// Cannot force glibc's malloc() to crash when a large size is requested, do
|
||||||
|
// it in the shim instead.
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return __libc_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
const auto total = base::CheckMul(n, size);
|
||||||
|
if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size * n);
|
||||||
|
|
||||||
|
return __libc_calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_realloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_memalign(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
|
||||||
|
__libc_free(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
NO_SANITIZE("cfi-icall")
|
||||||
|
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
|
||||||
|
// resolve it instead. This should be safe because glibc (and hence dlfcn)
|
||||||
|
// does not use malloc_size internally and so there should not be a risk of
|
||||||
|
// recursion.
|
||||||
|
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
|
||||||
|
static MallocUsableSizeFunction fn_ptr =
|
||||||
|
reinterpret_cast<MallocUsableSizeFunction>(
|
||||||
|
dlsym(RTLD_NEXT, "malloc_usable_size"));
|
||||||
|
|
||||||
|
return fn_ptr(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&GlibcMalloc, /* alloc_function */
|
||||||
|
&GlibcUncheckedMalloc, /* alloc_unchecked_function */
|
||||||
|
&GlibcCalloc, /* alloc_zero_initialized_function */
|
||||||
|
&GlibcMemalign, /* alloc_aligned_function */
|
||||||
|
&GlibcRealloc, /* realloc_function */
|
||||||
|
&GlibcFree, /* free_function */
|
||||||
|
&GlibcGetSizeEstimate, /* get_size_estimate_function */
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
@ -0,0 +1,84 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// This translation unit defines a default dispatch for the allocator shim which
|
||||||
|
// routes allocations to the original libc functions when using the link-time
|
||||||
|
// -Wl,-wrap,malloc approach (see README.md).
|
||||||
|
// The __real_X functions here are special symbols that the linker will relocate
|
||||||
|
// against the real "X" undefined symbol, so that __real_malloc becomes the
|
||||||
|
// equivalent of what an undefined malloc symbol reference would have been.
|
||||||
|
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
|
||||||
|
// which routes the __wrap_X functions into the shim.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
void* __real_malloc(size_t);
|
||||||
|
void* __real_calloc(size_t, size_t);
|
||||||
|
void* __real_realloc(void*, size_t);
|
||||||
|
void* __real_memalign(size_t, size_t);
|
||||||
|
void __real_free(void*);
|
||||||
|
size_t __real_malloc_usable_size(void*);
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
return __real_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_realloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_memalign(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RealFree(const AllocatorDispatch*, void* address, void* context) {
|
||||||
|
__real_free(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t RealSizeEstimate(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return __real_malloc_usable_size(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&RealMalloc, /* alloc_function */
|
||||||
|
&RealMalloc, /* alloc_unchecked_function */
|
||||||
|
&RealCalloc, /* alloc_zero_initialized_function */
|
||||||
|
&RealMemalign, /* alloc_aligned_function */
|
||||||
|
&RealRealloc, /* realloc_function */
|
||||||
|
&RealFree, /* free_function */
|
||||||
|
&RealSizeEstimate, /* get_size_estimate_function */
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* CallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MemalignImpl(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* ReallocImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
ptr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned BatchMallocImpl(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.batch_malloc(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
|
||||||
|
num_requested);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchFreeImpl(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
to_be_freed, num_to_be_freed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.free_definite_size(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&MallocImpl, /* alloc_function */
|
||||||
|
&MallocImpl, /* alloc_unchecked_function */
|
||||||
|
&CallocImpl, /* alloc_zero_initialized_function */
|
||||||
|
&MemalignImpl, /* alloc_aligned_function */
|
||||||
|
&ReallocImpl, /* realloc_function */
|
||||||
|
&FreeImpl, /* free_function */
|
||||||
|
&GetSizeEstimateImpl, /* get_size_estimate_function */
|
||||||
|
&BatchMallocImpl, /* batch_malloc_function */
|
||||||
|
&BatchFreeImpl, /* batch_free_function */
|
||||||
|
&FreeDefiniteSizeImpl, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
@ -0,0 +1,810 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/memory/nonscannable_memory.h"
|
||||||
|
#include "base/numerics/checked_math.h"
|
||||||
|
#include "base/numerics/safe_conversions.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "build/chromecast_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class SimpleScopedSpinLocker {
|
||||||
|
public:
|
||||||
|
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
|
||||||
|
// Lock. Semantically equivalent to base::Lock::Acquire().
|
||||||
|
bool expected = false;
|
||||||
|
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
|
||||||
|
// in this case we don't imply any ordering.
|
||||||
|
//
|
||||||
|
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
|
||||||
|
while (!lock_.compare_exchange_weak(
|
||||||
|
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||||
|
expected = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic<bool>& lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// We can't use a "static local" or a base::LazyInstance, as:
|
||||||
|
// - static local variables call into the runtime on Windows, which is not
|
||||||
|
// prepared to handle it, as the first allocation happens during CRT init.
|
||||||
|
// - We don't want to depend on base::LazyInstance, which may be converted to
|
||||||
|
// static locals one day.
|
||||||
|
//
|
||||||
|
// Nevertheless, this provides essentially the same thing.
|
||||||
|
template <typename T, typename Constructor>
|
||||||
|
class LeakySingleton {
|
||||||
|
public:
|
||||||
|
constexpr LeakySingleton() = default;
|
||||||
|
|
||||||
|
ALWAYS_INLINE T* Get() {
|
||||||
|
auto* instance = instance_.load(std::memory_order_acquire);
|
||||||
|
if (LIKELY(instance))
|
||||||
|
return instance;
|
||||||
|
|
||||||
|
return GetSlowPath();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces the instance pointer with a new one.
|
||||||
|
void Replace(T* new_instance) {
|
||||||
|
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||||
|
|
||||||
|
// Modify under the lock to avoid race between |if (instance)| and
|
||||||
|
// |instance_.store()| in GetSlowPath().
|
||||||
|
instance_.store(new_instance, std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* GetSlowPath();
|
||||||
|
|
||||||
|
std::atomic<T*> instance_;
|
||||||
|
// Before C++20, having an initializer here causes a "variable does not have a
|
||||||
|
// constant initializer" error. In C++20, omitting it causes a similar error.
|
||||||
|
// Presumably this is due to the C++20 changes to make atomic initialization
|
||||||
|
// (of the other members of this class) sane, so guarding under that
|
||||||
|
// feature-test.
|
||||||
|
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||||
|
__cpp_lib_atomic_value_initialization < 201911L
|
||||||
|
alignas(T) uint8_t instance_buffer_[sizeof(T)];
|
||||||
|
#else
|
||||||
|
alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
|
||||||
|
#endif
|
||||||
|
std::atomic<bool> initialization_lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T, typename Constructor>
|
||||||
|
T* LeakySingleton<T, Constructor>::GetSlowPath() {
|
||||||
|
// The instance has not been set, the proper way to proceed (correct
|
||||||
|
// double-checked locking) is:
|
||||||
|
//
|
||||||
|
// auto* instance = instance_.load(std::memory_order_acquire);
|
||||||
|
// if (!instance) {
|
||||||
|
// ScopedLock initialization_lock;
|
||||||
|
// root = instance_.load(std::memory_order_relaxed);
|
||||||
|
// if (root)
|
||||||
|
// return root;
|
||||||
|
// instance = Create new root;
|
||||||
|
// instance_.store(instance, std::memory_order_release);
|
||||||
|
// return instance;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, we don't want to use a base::Lock here, so instead we use
|
||||||
|
// compare-and-exchange on a lock variable, which provides the same
|
||||||
|
// guarantees.
|
||||||
|
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||||
|
|
||||||
|
T* instance = instance_.load(std::memory_order_relaxed);
|
||||||
|
// Someone beat us.
|
||||||
|
if (instance)
|
||||||
|
return instance;
|
||||||
|
|
||||||
|
instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
|
||||||
|
instance_.store(instance, std::memory_order_release);
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
class MainPartitionConstructor {
|
||||||
|
public:
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||||
|
constexpr partition_alloc::PartitionOptions::ThreadCache thread_cache =
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Additional partitions may be created in ConfigurePartitions(). Since
|
||||||
|
// only one partition can have thread cache enabled, postpone the
|
||||||
|
// decision to turn the thread cache on until after that call.
|
||||||
|
// TODO(bartekn): Enable it here by default, once the "split-only" mode
|
||||||
|
// is no longer needed.
|
||||||
|
partition_alloc::PartitionOptions::ThreadCache::kDisabled;
|
||||||
|
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Other tests, such as the ThreadCache tests create a thread cache,
|
||||||
|
// and only one is supported at a time.
|
||||||
|
partition_alloc::PartitionOptions::ThreadCache::kDisabled;
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
|
||||||
|
partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
|
||||||
|
thread_cache,
|
||||||
|
partition_alloc::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::Cookie::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
|
||||||
|
return new_root;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||||
|
MainPartitionConstructor>
|
||||||
|
g_root CONSTINIT = {};
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* Allocator() {
|
||||||
|
return g_root.Get();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Original g_root_ if it was replaced by ConfigurePartitions().
|
||||||
|
std::atomic<partition_alloc::ThreadSafePartitionRoot*> g_original_root(nullptr);
|
||||||
|
|
||||||
|
class AlignedPartitionConstructor {
|
||||||
|
public:
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||||
|
return g_root.Get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||||
|
AlignedPartitionConstructor>
|
||||||
|
g_aligned_root CONSTINIT = {};
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
|
||||||
|
return g_original_root.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* AlignedAllocator() {
|
||||||
|
return g_aligned_root.Get();
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
bool IsRunning32bitEmulatedOnArm64() {
|
||||||
|
using IsWow64Process2Function = decltype(&IsWow64Process2);
|
||||||
|
|
||||||
|
IsWow64Process2Function is_wow64_process2 =
|
||||||
|
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
|
||||||
|
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
|
||||||
|
if (!is_wow64_process2)
|
||||||
|
return false;
|
||||||
|
USHORT process_machine;
|
||||||
|
USHORT native_machine;
|
||||||
|
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
|
||||||
|
&native_machine);
|
||||||
|
if (!retval)
|
||||||
|
return false;
|
||||||
|
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
|
||||||
|
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
|
||||||
|
size_t g_extra_bytes;
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
|
||||||
|
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
|
||||||
|
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
|
||||||
|
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
return size;
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocateAlignedMemory(size_t alignment, size_t size) {
|
||||||
|
// Memory returned by the regular allocator *always* respects |kAlignment|,
|
||||||
|
// which is a power of two, and any valid alignment is also a power of two. So
|
||||||
|
// we can directly fulfill these requests with the main allocator.
|
||||||
|
//
|
||||||
|
// This has several advantages:
|
||||||
|
// - The thread cache is supported on the main partition
|
||||||
|
// - Reduced fragmentation
|
||||||
|
// - Better coverage for MiraclePtr variants requiring extras
|
||||||
|
//
|
||||||
|
// There are several call sites in Chromium where base::AlignedAlloc is called
|
||||||
|
// with a small alignment. Some may be due to overly-careful code, some are
|
||||||
|
// because the client code doesn't know the required alignment at compile
|
||||||
|
// time.
|
||||||
|
//
|
||||||
|
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
|
||||||
|
// instance) directly call PartitionFree(), so there is no risk of
|
||||||
|
// mismatch. (see below the default_dispatch definition).
|
||||||
|
if (alignment <= partition_alloc::internal::kAlignment) {
|
||||||
|
// This is mandated by |posix_memalign()| and friends, so should never fire.
|
||||||
|
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
|
||||||
|
// TODO(bartekn): See if the compiler optimizes branches down the stack on
|
||||||
|
// Mac, where PartitionPageSize() isn't constexpr.
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
0, size, partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
return AlignedAllocator()->AlignedAllocWithFlags(
|
||||||
|
partition_alloc::AllocFlags::kNoHooks, alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
unsigned int g_alloc_flags = 0;
|
||||||
|
#else
|
||||||
|
constexpr unsigned int g_alloc_flags = 0;
|
||||||
|
#endif
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// We generally prefer to always crash rather than returning nullptr for
|
||||||
|
// OOM. However, on some macOS releases, we have to locally allow it due to
|
||||||
|
// weirdness in OS code. See https://crbug.com/654695 for details.
|
||||||
|
//
|
||||||
|
// Apple only since it's not needed elsewhere, and there is a performance
|
||||||
|
// penalty.
|
||||||
|
|
||||||
|
if (value)
|
||||||
|
g_alloc_flags = 0;
|
||||||
|
else
|
||||||
|
g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
g_alloc_flags, MaybeAdjustSize(size),
|
||||||
|
partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMallocUnchecked(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
|
||||||
|
MaybeAdjustSize(size), partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
|
||||||
|
partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return AllocateAlignedMemory(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return AllocateAlignedMemory(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// aligned_realloc documentation is
|
||||||
|
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
|
||||||
|
// TODO(tasak): Expand the given memory block to the given size if possible.
|
||||||
|
// This realloc always free the original memory block and allocates a new memory
|
||||||
|
// block.
|
||||||
|
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
|
||||||
|
// and use it.
|
||||||
|
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
void* new_ptr = nullptr;
|
||||||
|
if (size > 0) {
|
||||||
|
size = MaybeAdjustSize(size);
|
||||||
|
new_ptr = AllocateAlignedMemory(alignment, size);
|
||||||
|
} else {
|
||||||
|
// size == 0 and address != null means just "free(address)".
|
||||||
|
if (address)
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
// The original memory block (specified by address) is unchanged if ENOMEM.
|
||||||
|
if (!new_ptr)
|
||||||
|
return nullptr;
|
||||||
|
// TODO(tasak): Need to compare the new alignment with the address' alignment.
|
||||||
|
// If the two alignments are not the same, need to return nullptr with EINVAL.
|
||||||
|
if (address) {
|
||||||
|
size_t usage =
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||||
|
size_t copy_size = usage > size ? size : usage;
|
||||||
|
memcpy(new_ptr, address, copy_size);
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
return new_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(address)) &&
|
||||||
|
address)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `realloc` which supports zone-
|
||||||
|
// dispatching so that it appropriately selects the right zone.
|
||||||
|
return realloc(address, size);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
return Allocator()->ReallocWithFlags(
|
||||||
|
partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address,
|
||||||
|
MaybeAdjustSize(size), "");
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
extern "C" {
|
||||||
|
void __real_free(void*);
|
||||||
|
} // extern "C"
|
||||||
|
#endif // BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
|
||||||
|
void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// TODO(bartekn): Add MTE unmasking here (and below).
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(object)) &&
|
||||||
|
object)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `free` which supports zone-
|
||||||
|
// dispatching so that it appropriately selects the right zone.
|
||||||
|
return free(object);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// On Android Chromecast devices, there is at least one case where a system
|
||||||
|
// malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
|
||||||
|
// the pointer, pass it along. This should not have a runtime cost vs regular
|
||||||
|
// Android, since on Android we have a PA_CHECK() rather than the branch here.
|
||||||
|
#if BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(object)) &&
|
||||||
|
object)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `free()`, which is `__real_free()`
|
||||||
|
// here.
|
||||||
|
return __real_free(object);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// Normal free() path on Apple OSes:
|
||||||
|
// 1. size = GetSizeEstimate(ptr);
|
||||||
|
// 2. if (size) FreeDefiniteSize(ptr, size)
|
||||||
|
//
|
||||||
|
// So we don't need to re-check that the pointer is owned in Free(), and we
|
||||||
|
// can use the size.
|
||||||
|
void PartitionFreeDefiniteSize(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
|
||||||
|
// still useful though, as we avoid double-checking that the address is owned.
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// This is used to implement malloc_usable_size(3). Per its man page, "if ptr
|
||||||
|
// is NULL, 0 is returned".
|
||||||
|
if (!address)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
if (!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(address))) {
|
||||||
|
// The object pointed to by `address` is not allocated by the
|
||||||
|
// PartitionAlloc. The return value `0` means that the pointer does not
|
||||||
|
// belong to this malloc zone.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// TODO(lizeb): Returns incorrect values for aligned allocations.
|
||||||
|
const size_t size =
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// The object pointed to by `address` is allocated by the PartitionAlloc.
|
||||||
|
// So, this function must not return zero so that the malloc zone dispatcher
|
||||||
|
// finds the appropriate malloc zone.
|
||||||
|
PA_DCHECK(size);
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned PartitionBatchMalloc(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
// No real batching: we could only acquire the lock once for instance, keep it
|
||||||
|
// simple for now.
|
||||||
|
for (unsigned i = 0; i < num_requested; i++) {
|
||||||
|
// No need to check the results, we crash if it fails.
|
||||||
|
results[i] = PartitionMalloc(nullptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Either all succeeded, or we crashed.
|
||||||
|
return num_requested;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionBatchFree(const AllocatorDispatch*,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
// No real batching: we could only acquire the lock once for instance, keep it
|
||||||
|
// simple for now.
|
||||||
|
for (unsigned i = 0; i < num_to_be_freed; i++) {
|
||||||
|
PartitionFree(nullptr, to_be_freed[i], nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
|
||||||
|
return ::Allocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
partition_alloc::ThreadSafePartitionRoot*
|
||||||
|
PartitionAllocMalloc::OriginalAllocator() {
|
||||||
|
return ::OriginalAllocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
partition_alloc::ThreadSafePartitionRoot*
|
||||||
|
PartitionAllocMalloc::AlignedAllocator() {
|
||||||
|
return ::AlignedAllocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void EnablePartitionAllocMemoryReclaimer() {
|
||||||
|
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
|
||||||
|
// their PartitionRoots to the memory reclaimer, because doing so may allocate
|
||||||
|
// memory. Thus, the registration to the memory reclaimer has to be done
|
||||||
|
// some time later, when the main root is fully configured.
|
||||||
|
// TODO(bartekn): Aligned allocator can use the regular initialization path.
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
Allocator());
|
||||||
|
auto* original_root = OriginalAllocator();
|
||||||
|
if (original_root)
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
original_root);
|
||||||
|
if (AlignedAllocator() != Allocator()) {
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
AlignedAllocator());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||||
|
g_allocator_buffer_for_new_main_partition[sizeof(
|
||||||
|
partition_alloc::ThreadSafePartitionRoot)];
|
||||||
|
|
||||||
|
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||||
|
g_allocator_buffer_for_aligned_alloc_partition[sizeof(
|
||||||
|
partition_alloc::ThreadSafePartitionRoot)];
|
||||||
|
|
||||||
|
void ConfigurePartitions(
|
||||||
|
EnableBrp enable_brp,
|
||||||
|
EnableBrpZapping enable_brp_zapping,
|
||||||
|
SplitMainPartition split_main_partition,
|
||||||
|
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||||
|
AlternateBucketDistribution use_alternate_bucket_distribution) {
|
||||||
|
// BRP cannot be enabled without splitting the main partition. Furthermore, in
|
||||||
|
// the "before allocation" mode, it can't be enabled without further splitting
|
||||||
|
// out the aligned partition.
|
||||||
|
PA_CHECK(!enable_brp || split_main_partition);
|
||||||
|
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||||
|
PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
|
||||||
|
#endif
|
||||||
|
// Can't split out the aligned partition, without splitting the main one.
|
||||||
|
PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
|
||||||
|
|
||||||
|
static bool configured = false;
|
||||||
|
PA_CHECK(!configured);
|
||||||
|
configured = true;
|
||||||
|
|
||||||
|
// Calling Get() is actually important, even if the return values weren't
|
||||||
|
// used, because it has a side effect of initializing the variables, if they
|
||||||
|
// weren't already.
|
||||||
|
auto* current_root = g_root.Get();
|
||||||
|
auto* current_aligned_root = g_aligned_root.Get();
|
||||||
|
|
||||||
|
if (!split_main_partition) {
|
||||||
|
if (!use_alternate_bucket_distribution) {
|
||||||
|
current_root->SwitchToDenserBucketDistribution();
|
||||||
|
current_aligned_root->SwitchToDenserBucketDistribution();
|
||||||
|
}
|
||||||
|
PA_DCHECK(!enable_brp);
|
||||||
|
PA_DCHECK(!use_dedicated_aligned_partition);
|
||||||
|
PA_DCHECK(!current_root->flags.with_thread_cache);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto* new_root = new (g_allocator_buffer_for_new_main_partition)
|
||||||
|
partition_alloc::ThreadSafePartitionRoot({
|
||||||
|
!use_dedicated_aligned_partition
|
||||||
|
? partition_alloc::PartitionOptions::AlignedAlloc::kAllowed
|
||||||
|
: partition_alloc::PartitionOptions::AlignedAlloc::kDisallowed,
|
||||||
|
partition_alloc::PartitionOptions::ThreadCache::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::Cookie::kAllowed,
|
||||||
|
enable_brp
|
||||||
|
? partition_alloc::PartitionOptions::BackupRefPtr::kEnabled
|
||||||
|
: partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
enable_brp_zapping
|
||||||
|
? partition_alloc::PartitionOptions::BackupRefPtrZapping::kEnabled
|
||||||
|
: partition_alloc::PartitionOptions::BackupRefPtrZapping::
|
||||||
|
kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
|
||||||
|
if (use_dedicated_aligned_partition) {
|
||||||
|
// TODO(bartekn): Use the original root instead of creating a new one. It'd
|
||||||
|
// result in one less partition, but come at a cost of commingling types.
|
||||||
|
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
|
||||||
|
partition_alloc::ThreadSafePartitionRoot({
|
||||||
|
partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::ThreadCache::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::Cookie::kAllowed,
|
||||||
|
partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
|
||||||
|
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// The new main root can also support AlignedAlloc.
|
||||||
|
new_aligned_root = new_root;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now switch traffic to the new partitions.
|
||||||
|
g_aligned_root.Replace(new_aligned_root);
|
||||||
|
g_root.Replace(new_root);
|
||||||
|
|
||||||
|
// g_original_root has to be set after g_root, because other code doesn't
|
||||||
|
// handle well both pointing to the same root.
|
||||||
|
// TODO(bartekn): Reorder, once handled well. It isn't ideal for one
|
||||||
|
// partition to be invisible temporarily.
|
||||||
|
g_original_root = current_root;
|
||||||
|
|
||||||
|
// No need for g_original_aligned_root, because in cases where g_aligned_root
|
||||||
|
// is replaced, it must've been g_original_root.
|
||||||
|
PA_CHECK(current_aligned_root == g_original_root);
|
||||||
|
|
||||||
|
// Purge memory, now that the traffic to the original partition is cut off.
|
||||||
|
current_root->PurgeMemory(
|
||||||
|
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
|
||||||
|
|
||||||
|
if (!use_alternate_bucket_distribution) {
|
||||||
|
g_root.Get()->SwitchToDenserBucketDistribution();
|
||||||
|
g_aligned_root.Get()->SwitchToDenserBucketDistribution();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
|
||||||
|
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
|
||||||
|
&::base::PlatformThread::SetName);
|
||||||
|
partition_alloc::internal::PCScan::Initialize(config);
|
||||||
|
|
||||||
|
partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
|
||||||
|
if (OriginalAllocator() != nullptr)
|
||||||
|
partition_alloc::internal::PCScan::RegisterScannableRoot(
|
||||||
|
OriginalAllocator());
|
||||||
|
if (Allocator() != AlignedAllocator())
|
||||||
|
partition_alloc::internal::PCScan::RegisterScannableRoot(
|
||||||
|
AlignedAllocator());
|
||||||
|
|
||||||
|
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
|
||||||
|
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// Call this as soon as possible during startup.
|
||||||
|
void ConfigurePartitionAlloc() {
|
||||||
|
#if defined(ARCH_CPU_X86)
|
||||||
|
if (IsRunning32bitEmulatedOnArm64())
|
||||||
|
g_extra_bytes = 8;
|
||||||
|
#endif // defined(ARCH_CPU_X86)
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&base::internal::PartitionMalloc, // alloc_function
|
||||||
|
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
|
||||||
|
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
|
||||||
|
&base::internal::PartitionMemalign, // alloc_aligned_function
|
||||||
|
&base::internal::PartitionRealloc, // realloc_function
|
||||||
|
&base::internal::PartitionFree, // free_function
|
||||||
|
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
|
||||||
|
&base::internal::PartitionBatchMalloc, // batch_malloc_function
|
||||||
|
&base::internal::PartitionBatchFree, // batch_free_function
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// On Apple OSes, free_definite_size() is always called from free(), since
|
||||||
|
// get_size_estimate() is used to determine whether an allocation belongs to
|
||||||
|
// the current zone. It makes sense to optimize for it.
|
||||||
|
&base::internal::PartitionFreeDefiniteSize,
|
||||||
|
#else
|
||||||
|
nullptr, // free_definite_size_function
|
||||||
|
#endif
|
||||||
|
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
|
||||||
|
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
|
||||||
|
&base::internal::PartitionFree, // aligned_free_function
|
||||||
|
nullptr, // next
|
||||||
|
};
|
||||||
|
|
||||||
|
// Intercept diagnostics symbols as well, even though they are not part of the
|
||||||
|
// unified shim layer.
|
||||||
|
//
|
||||||
|
// TODO(lizeb): Implement the ones that doable.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
|
||||||
|
partition_alloc::SimplePartitionStatsDumper allocator_dumper;
|
||||||
|
Allocator()->DumpStats("malloc", true, &allocator_dumper);
|
||||||
|
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
|
||||||
|
|
||||||
|
partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
|
||||||
|
if (AlignedAllocator() != Allocator()) {
|
||||||
|
AlignedAllocator()->DumpStats("posix_memalign", true,
|
||||||
|
&aligned_allocator_dumper);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump stats for nonscannable and nonquarantinable allocators.
|
||||||
|
auto& nonscannable_allocator =
|
||||||
|
base::internal::NonScannableAllocator::Instance();
|
||||||
|
partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
|
||||||
|
if (auto* nonscannable_root = nonscannable_allocator.root())
|
||||||
|
nonscannable_root->DumpStats("malloc", true,
|
||||||
|
&nonscannable_allocator_dumper);
|
||||||
|
auto& nonquarantinable_allocator =
|
||||||
|
base::internal::NonQuarantinableAllocator::Instance();
|
||||||
|
partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
|
||||||
|
if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
|
||||||
|
nonquarantinable_root->DumpStats("malloc", true,
|
||||||
|
&nonquarantinable_allocator_dumper);
|
||||||
|
|
||||||
|
struct mallinfo info = {0};
|
||||||
|
info.arena = 0; // Memory *not* allocated with mmap().
|
||||||
|
|
||||||
|
// Memory allocated with mmap(), aka virtual size.
|
||||||
|
info.hblks = base::checked_cast<decltype(info.hblks)>(
|
||||||
|
allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
|
||||||
|
// Resident bytes.
|
||||||
|
info.hblkhd = base::checked_cast<decltype(info.hblkhd)>(
|
||||||
|
allocator_dumper.stats().total_resident_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_resident_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_resident_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_resident_bytes);
|
||||||
|
// Allocated bytes.
|
||||||
|
info.uordblks = base::checked_cast<decltype(info.uordblks)>(
|
||||||
|
allocator_dumper.stats().total_active_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_active_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_active_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_active_bytes);
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void InitializeDefaultAllocatorPartitionRoot() {
|
||||||
|
// On OS_APPLE, the initialization of PartitionRoot uses memory allocations
|
||||||
|
// internally, e.g. __builtin_available, and it's not easy to avoid it.
|
||||||
|
// Thus, we initialize the PartitionRoot with using the system default
|
||||||
|
// allocator before we intercept the system default allocator.
|
||||||
|
std::ignore = Allocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
|
||||||
|
|
||||||
|
class BASE_EXPORT PartitionAllocMalloc {
|
||||||
|
public:
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* Allocator();
|
||||||
|
// May return |nullptr|, will never return the same pointer as |Allocator()|.
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* OriginalAllocator();
|
||||||
|
// May return the same pointer as |Allocator()|.
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* AlignedAllocator();
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMallocUnchecked(
|
||||||
|
const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionAlignedAlloc(
|
||||||
|
const base::allocator::AllocatorDispatch* dispatch,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionAlignedRealloc(
|
||||||
|
const base::allocator::AllocatorDispatch* dispatch,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* object,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT size_t
|
||||||
|
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
|
||||||
|
#include <ostream>
|
||||||
|
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapMalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t elem_size,
|
||||||
|
void* context) {
|
||||||
|
// Overflow check.
|
||||||
|
const size_t size = n * elem_size;
|
||||||
|
if (elem_size != 0 && size / elem_size != n)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
void* result = DefaultWinHeapMallocImpl(self, size, context);
|
||||||
|
if (result) {
|
||||||
|
memset(result, 0, size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
CHECK(false) << "The windows heap does not support memalign.";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapRealloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
base::allocator::WinHeapFree(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapGetSizeEstimate(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapAlignedMalloc(size, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
void* context) {
|
||||||
|
base::allocator::WinHeapAlignedFree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// Guarantee that default_dispatch is compile-time initialized to avoid using
|
||||||
|
// it before initialization (allocations before main in release builds with
|
||||||
|
// optimizations disabled).
|
||||||
|
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&DefaultWinHeapMallocImpl,
|
||||||
|
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
|
||||||
|
&DefaultWinHeapCallocImpl,
|
||||||
|
&DefaultWinHeapMemalignImpl,
|
||||||
|
&DefaultWinHeapReallocImpl,
|
||||||
|
&DefaultWinHeapFreeImpl,
|
||||||
|
&DefaultWinHeapGetSizeEstimateImpl,
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
&DefaultWinHeapAlignedMallocImpl,
|
||||||
|
&DefaultWinHeapAlignedReallocImpl,
|
||||||
|
&DefaultWinHeapAlignedFreeImpl,
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
53
src/base/allocator/allocator_shim_internals.h
Normal file
53
src/base/allocator/allocator_shim_internals.h
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_POSIX)
|
||||||
|
#include <sys/cdefs.h> // for __THROW
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __THROW // Not a glibc system
|
||||||
|
#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
|
||||||
|
#define __THROW _NOEXCEPT
|
||||||
|
#else
|
||||||
|
#define __THROW
|
||||||
|
#endif // !_NOEXCEPT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
|
||||||
|
//
|
||||||
|
// If an exported symbol is linked into a DSO, it may be preempted by a
|
||||||
|
// definition in the main executable. If this happens to an allocator symbol, it
|
||||||
|
// will mean that the DSO will use the main executable's allocator. This is
|
||||||
|
// normally relatively harmless -- regular allocations should all use the same
|
||||||
|
// allocator, but if the DSO tries to hook the allocator it will not see any
|
||||||
|
// allocations.
|
||||||
|
//
|
||||||
|
// However, if LLVM LTO is enabled, the compiler may inline the shim layer
|
||||||
|
// symbols into callers. The end result is that allocator calls in DSOs may use
|
||||||
|
// either the main executable's allocator or the DSO's allocator, depending on
|
||||||
|
// whether the call was inlined. This is arguably a bug in LLVM caused by its
|
||||||
|
// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
|
||||||
|
// To work around the bug we use noinline to prevent the symbols from being
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// In the long run we probably want to avoid linking the allocator bits into
|
||||||
|
// DSOs altogether. This will save a little space and stop giving DSOs the false
|
||||||
|
// impression that they can hook the allocator.
|
||||||
|
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_WIN) // __GNUC__
|
||||||
|
|
||||||
|
#define __THROW
|
||||||
|
#define SHIM_ALWAYS_EXPORT __declspec(noinline)
|
||||||
|
|
||||||
|
#endif // __GNUC__
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
130
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
130
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||||
|
|
||||||
|
// Preempt the default new/delete C++ symbols so they call the shim entry
|
||||||
|
// points. This file is strongly inspired by tcmalloc's
|
||||||
|
// libc_override_redefine.h.
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_APPLE)
|
||||||
|
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
|
||||||
|
#else
|
||||||
|
// On Apple OSes, prefer not exporting these symbols (as this reverts to the
|
||||||
|
// default behavior, they are still exported in e.g. component builds). This is
|
||||||
|
// partly due to intentional limits on exported symbols in the main library, but
|
||||||
|
// it is also needless, since no library used on macOS imports these.
|
||||||
|
//
|
||||||
|
// TODO(lizeb): It may not be necessary anywhere to export these.
|
||||||
|
#define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
|
||||||
|
return ShimCppNew(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
|
||||||
|
return ShimCppNew(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
|
||||||
|
std::align_val_t alignment) {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
|
||||||
|
std::align_val_t alignment,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||||
|
std::align_val_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||||
|
std::size_t size,
|
||||||
|
std::align_val_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||||
|
std::align_val_t,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
|
||||||
|
std::align_val_t alignment) {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
|
||||||
|
std::align_val_t alignment,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||||
|
std::align_val_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||||
|
std::size_t size,
|
||||||
|
std::align_val_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||||
|
std::align_val_t,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||||
|
|
||||||
|
// Alias the internal Glibc symbols to the shim entry points.
|
||||||
|
// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
|
||||||
|
// Effectively this file does two things:
|
||||||
|
// 1) Re-define the __malloc_hook & co symbols. Those symbols are defined as
|
||||||
|
// weak in glibc and are meant to be defined strongly by client processes
|
||||||
|
// to hook calls initiated from within glibc.
|
||||||
|
// 2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
|
||||||
|
// is that in the past (in RedHat 9) we had instances of libraries that were
|
||||||
|
// allocating via malloc() and freeing using __libc_free().
|
||||||
|
// See tcmalloc's libc_override_glibc.h for more context.
|
||||||
|
|
||||||
|
#include <features.h> // for __GLIBC__
|
||||||
|
#include <malloc.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
|
||||||
|
#if !defined(__MALLOC_HOOK_VOLATILE)
|
||||||
|
#define MALLOC_HOOK_MAYBE_VOLATILE /**/
|
||||||
|
#else
|
||||||
|
#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// 1) Re-define malloc_hook weak symbols.
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void* GlibcMallocHook(size_t size, const void* caller) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlibcFreeHook(void* ptr, const void* caller) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
|
||||||
|
return ShimMemalign(align, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
|
||||||
|
const void*) = &GlibcMallocHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
|
||||||
|
&GlibcReallocHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
|
||||||
|
const void*) = &GlibcFreeHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
|
||||||
|
&GlibcMemalignHook;
|
||||||
|
|
||||||
|
// 2) Redefine libc symbols themselves.
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
|
||||||
|
return ShimPosixMemalign(r, a, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
// Safety check.
|
||||||
|
#if !defined(__GLIBC__)
|
||||||
|
#error The target platform does not seem to use Glibc. Disable the allocator \
|
||||||
|
shim by setting use_allocator_shim=false in GN args.
|
||||||
|
#endif
|
88
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
88
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Its purpose is to preempt the Libc symbols for malloc/new so they call the
|
||||||
|
// shim layer entry points.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
#else
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// WARNING: Whenever a new function is added there (which, surprisingly enough,
|
||||||
|
// happens. For instance glibc 2.33 introduced mallinfo2(), which we don't
|
||||||
|
// support... yet?), it MUST be added to build/linux/chrome.map.
|
||||||
|
//
|
||||||
|
// Otherwise the new symbol is not exported from Chromium's main binary, which
|
||||||
|
// is necessary to override libc's weak symbol, which in turn is necessary to
|
||||||
|
// intercept calls made by dynamic libraries. See crbug.com/1292206 for such
|
||||||
|
// an example.
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
|
||||||
|
return ShimPosixMemalign(r, a, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
|
||||||
|
return ShimGetSizeEstimate(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
|
||||||
|
return ShimGetSizeEstimate(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default dispatch translation unit has to define also the following
|
||||||
|
// symbols (unless they are ultimately routed to the system symbols):
|
||||||
|
// void malloc_stats(void);
|
||||||
|
// int mallopt(int, int);
|
||||||
|
// struct mallinfo mallinfo(void);
|
||||||
|
|
||||||
|
} // extern "C"
|
@ -0,0 +1,154 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||||
|
|
||||||
|
// This header overrides the __wrap_X symbols when using the link-time
|
||||||
|
// -Wl,-wrap,malloc shim-layer approach (see README.md).
|
||||||
|
// All references to malloc, free, etc. within the linker unit that gets the
|
||||||
|
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
|
||||||
|
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
|
||||||
|
return ShimMemalign(align, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
|
||||||
|
size_t align,
|
||||||
|
size_t size) {
|
||||||
|
return ShimPosixMemalign(res, align, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
|
||||||
|
return ShimRealloc(address, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT size_t __wrap_malloc_usable_size(void* address) {
|
||||||
|
return ShimGetSizeEstimate(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t kPathMaxSize = 8192;
|
||||||
|
static_assert(kPathMaxSize >= PATH_MAX, "");
|
||||||
|
|
||||||
|
extern char* __wrap_strdup(const char* str);
|
||||||
|
|
||||||
|
// Override <stdlib.h>
|
||||||
|
|
||||||
|
extern char* __real_realpath(const char* path, char* resolved_path);
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
|
||||||
|
char* resolved_path) {
|
||||||
|
if (resolved_path)
|
||||||
|
return __real_realpath(path, resolved_path);
|
||||||
|
|
||||||
|
char buffer[kPathMaxSize];
|
||||||
|
if (!__real_realpath(path, buffer))
|
||||||
|
return nullptr;
|
||||||
|
return __wrap_strdup(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override <string.h> functions
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
|
||||||
|
std::size_t length = std::strlen(str) + 1;
|
||||||
|
void* buffer = ShimMalloc(length, nullptr);
|
||||||
|
if (!buffer)
|
||||||
|
return nullptr;
|
||||||
|
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
|
||||||
|
std::size_t length = std::min(std::strlen(str), n);
|
||||||
|
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
|
||||||
|
if (!buffer)
|
||||||
|
return nullptr;
|
||||||
|
std::memcpy(buffer, str, length);
|
||||||
|
buffer[length] = '\0';
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override <unistd.h>
|
||||||
|
|
||||||
|
extern char* __real_getcwd(char* buffer, size_t size);
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
|
||||||
|
if (buffer)
|
||||||
|
return __real_getcwd(buffer, size);
|
||||||
|
|
||||||
|
if (!size)
|
||||||
|
size = kPathMaxSize;
|
||||||
|
char local_buffer[size];
|
||||||
|
if (!__real_getcwd(local_buffer, size))
|
||||||
|
return nullptr;
|
||||||
|
return __wrap_strdup(local_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override stdio.h
|
||||||
|
|
||||||
|
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
|
||||||
|
// Android, and used by libc++.
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
|
||||||
|
const char* fmt,
|
||||||
|
va_list va_args) {
|
||||||
|
constexpr int kInitialSize = 128;
|
||||||
|
*strp = static_cast<char*>(
|
||||||
|
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
|
||||||
|
|
||||||
|
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
|
||||||
|
if (actual_size < 0)
|
||||||
|
return actual_size;
|
||||||
|
*strp =
|
||||||
|
static_cast<char*>(realloc(*strp, static_cast<size_t>(actual_size + 1)));
|
||||||
|
|
||||||
|
// Now we know the size. This is not very efficient, but we cannot really do
|
||||||
|
// better without accessing internal libc functions, or reimplementing
|
||||||
|
// *printf().
|
||||||
|
//
|
||||||
|
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
|
||||||
|
// details.
|
||||||
|
if (actual_size >= kInitialSize)
|
||||||
|
return vsnprintf(*strp, static_cast<size_t>(actual_size + 1), fmt, va_args);
|
||||||
|
|
||||||
|
return actual_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
|
||||||
|
va_list va_args;
|
||||||
|
va_start(va_args, fmt);
|
||||||
|
int retval = vasprintf(strp, fmt, va_args);
|
||||||
|
va_end(va_args);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
377
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
377
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#error This header must be included iff PartitionAlloc-Everywhere is enabled.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base/allocator/early_zone_registration_mac.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/logging.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Defined in base/allocator/partition_allocator/partition_root.cc
|
||||||
|
void PartitionAllocMallocHookOnBeforeForkInParent();
|
||||||
|
void PartitionAllocMallocHookOnAfterForkInParent();
|
||||||
|
void PartitionAllocMallocHookOnAfterForkInChild();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base::allocator {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// malloc_introspection_t's callback functions for our own zone
|
||||||
|
|
||||||
|
kern_return_t MallocIntrospectionEnumerator(task_t task,
|
||||||
|
void*,
|
||||||
|
unsigned type_mask,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
vm_range_recorder_t recorder) {
|
||||||
|
// Should enumerate all memory regions allocated by this allocator, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
return KERN_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
|
||||||
|
return base::bits::AlignUp(size, partition_alloc::internal::kAlignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
|
||||||
|
// Should check the consistency of the allocator implementing this malloc
|
||||||
|
// zone, but not implemented just because of no use case for now.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
|
||||||
|
// Should print the current states of the zone for debugging / investigation
|
||||||
|
// purpose, but not implemented just because of no use case for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
|
||||||
|
// Should enable logging of the activities on the given `address`, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
|
||||||
|
// Called before fork(2) to acquire the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
|
||||||
|
// Called in the parent process after fork(2) to release the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionStatistics(malloc_zone_t* zone,
|
||||||
|
malloc_statistics_t* stats) {
|
||||||
|
// Should report the memory usage correctly, but not implemented just because
|
||||||
|
// of no use case for now.
|
||||||
|
stats->blocks_in_use = 0;
|
||||||
|
stats->size_in_use = 0;
|
||||||
|
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||||
|
stats->size_allocated = 0; // Reserved in memory
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
|
||||||
|
// Should return true if the underlying PartitionRoot is locked, but not
|
||||||
|
// implemented just because this function seems not used effectively.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionEnumerateDischargedPointers(
|
||||||
|
malloc_zone_t* zone,
|
||||||
|
void (^report_discharged)(void* memory, void* info)) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
|
||||||
|
// Called in a child process after fork(2) to re-initialize the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionPrintTask(task_t task,
|
||||||
|
unsigned level,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
print_task_printer_t printer) {
|
||||||
|
// Should print the current states of another process's zone for debugging /
|
||||||
|
// investigation purpose, but not implemented just because of no use case
|
||||||
|
// for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionTaskStatistics(task_t task,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
malloc_statistics_t* stats) {
|
||||||
|
// Should report the memory usage in another process's zone, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
stats->blocks_in_use = 0;
|
||||||
|
stats->size_in_use = 0;
|
||||||
|
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||||
|
stats->size_allocated = 0; // Reserved in memory
|
||||||
|
}
|
||||||
|
|
||||||
|
// malloc_zone_t's callback functions for our own zone
|
||||||
|
|
||||||
|
size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
|
||||||
|
return ShimGetSizeEstimate(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneDestroy(malloc_zone_t* zone) {
|
||||||
|
// No support to destroy the zone for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
|
||||||
|
return ShimMemalign(alignment, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return ShimFreeDefiniteSize(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested) {
|
||||||
|
return ShimBatchMalloc(size, results, num_requested, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneBatchFree(malloc_zone_t* zone,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num) {
|
||||||
|
return ShimBatchFree(to_be_freed, num, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
malloc_introspection_t g_mac_malloc_introspection{};
|
||||||
|
malloc_zone_t g_mac_malloc_zone{};
|
||||||
|
|
||||||
|
malloc_zone_t* GetDefaultMallocZone() {
|
||||||
|
// malloc_default_zone() does not return... the default zone, but the initial
|
||||||
|
// one. The default one is the first element of the default zone array.
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||||
|
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsAlreadyRegistered() {
|
||||||
|
// HACK: This should really only be called once, but it is not.
|
||||||
|
//
|
||||||
|
// This function is a static constructor of its binary. If it is included in a
|
||||||
|
// dynamic library, then the same process may end up executing this code
|
||||||
|
// multiple times, once per library. As a consequence, each new library will
|
||||||
|
// add its own allocator as the default zone. Aside from splitting the heap
|
||||||
|
// further, the main issue arises if/when the last library to be loaded
|
||||||
|
// (dlopen()-ed) gets dlclose()-ed.
|
||||||
|
//
|
||||||
|
// See crbug.com/1271139 for details.
|
||||||
|
//
|
||||||
|
// In this case, subsequent free() will be routed by libmalloc to the deleted
|
||||||
|
// zone (since its code has been unloaded from memory), and crash inside
|
||||||
|
// libsystem's free(). This in practice happens as soon as dlclose() is
|
||||||
|
// called, inside the dynamic linker (dyld).
|
||||||
|
//
|
||||||
|
// Since we are talking about different library, and issues inside the dynamic
|
||||||
|
// linker, we cannot use a global static variable (which would be
|
||||||
|
// per-library), or anything from pthread.
|
||||||
|
//
|
||||||
|
// The solution used here is to check whether the current default zone is
|
||||||
|
// already ours, in which case we are not the first dynamic library here, and
|
||||||
|
// should do nothing. This is racy, and hacky.
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
// *Not* using malloc_default_zone(), as it seems to be hardcoded to return
|
||||||
|
// something else than the default zone. See the difference between
|
||||||
|
// malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
|
||||||
|
// (in libmalloc).
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||||
|
// Checking all the zones, in case someone registered their own zone on top of
|
||||||
|
// our own.
|
||||||
|
for (unsigned int i = 0; i < zone_count; i++) {
|
||||||
|
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||||
|
|
||||||
|
// strcmp() and not a pointer comparison, as the zone was registered from
|
||||||
|
// another library, the pointers don't match.
|
||||||
|
if (zone->zone_name &&
|
||||||
|
(strcmp(zone->zone_name, partition_alloc::kPartitionAllocZoneName) ==
|
||||||
|
0)) {
|
||||||
|
// This zone is provided by PartitionAlloc, so this function has been
|
||||||
|
// called from another library (or the main executable), nothing to do.
|
||||||
|
//
|
||||||
|
// This should be a crash, ideally, but callers do it, so only warn, for
|
||||||
|
// now.
|
||||||
|
RAW_LOG(ERROR,
|
||||||
|
"Trying to load the allocator multiple times. This is *not* "
|
||||||
|
"supported.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeZone() {
|
||||||
|
g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
|
||||||
|
g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
|
||||||
|
g_mac_malloc_introspection.check = MallocIntrospectionCheck;
|
||||||
|
g_mac_malloc_introspection.print = MallocIntrospectionPrint;
|
||||||
|
g_mac_malloc_introspection.log = MallocIntrospectionLog;
|
||||||
|
g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
|
||||||
|
g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
|
||||||
|
g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
|
||||||
|
g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
|
||||||
|
g_mac_malloc_introspection.enable_discharge_checking =
|
||||||
|
MallocIntrospectionEnableDischargeChecking;
|
||||||
|
g_mac_malloc_introspection.disable_discharge_checking =
|
||||||
|
MallocIntrospectionDisableDischargeChecking;
|
||||||
|
g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
|
||||||
|
g_mac_malloc_introspection.enumerate_discharged_pointers =
|
||||||
|
MallocIntrospectionEnumerateDischargedPointers;
|
||||||
|
g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
|
||||||
|
g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
|
||||||
|
g_mac_malloc_introspection.task_statistics =
|
||||||
|
MallocIntrospectionTaskStatistics;
|
||||||
|
// `version` member indicates which APIs are supported in this zone.
|
||||||
|
// version >= 5: memalign is supported
|
||||||
|
// version >= 6: free_definite_size is supported
|
||||||
|
// version >= 7: introspect's discharge family is supported
|
||||||
|
// version >= 8: pressure_relief is supported
|
||||||
|
// version >= 9: introspect.reinit_lock is supported
|
||||||
|
// version >= 10: claimed_address is supported
|
||||||
|
// version >= 11: introspect.print_task is supported
|
||||||
|
// version >= 12: introspect.task_statistics is supported
|
||||||
|
g_mac_malloc_zone.version = partition_alloc::kZoneVersion;
|
||||||
|
g_mac_malloc_zone.zone_name = partition_alloc::kPartitionAllocZoneName;
|
||||||
|
g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
|
||||||
|
g_mac_malloc_zone.size = MallocZoneSize;
|
||||||
|
g_mac_malloc_zone.malloc = MallocZoneMalloc;
|
||||||
|
g_mac_malloc_zone.calloc = MallocZoneCalloc;
|
||||||
|
g_mac_malloc_zone.valloc = MallocZoneValloc;
|
||||||
|
g_mac_malloc_zone.free = MallocZoneFree;
|
||||||
|
g_mac_malloc_zone.realloc = MallocZoneRealloc;
|
||||||
|
g_mac_malloc_zone.destroy = MallocZoneDestroy;
|
||||||
|
g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
|
||||||
|
g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
|
||||||
|
g_mac_malloc_zone.memalign = MallocZoneMemalign;
|
||||||
|
g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
|
||||||
|
g_mac_malloc_zone.pressure_relief = nullptr;
|
||||||
|
g_mac_malloc_zone.claimed_address = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces the default malloc zone with our own malloc zone backed by
|
||||||
|
// PartitionAlloc. Since we'd like to make as much code as possible to use our
|
||||||
|
// own memory allocator (and reduce bugs caused by mixed use of the system
|
||||||
|
// allocator and our own allocator), run the following function
|
||||||
|
// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
|
||||||
|
//
|
||||||
|
// Note that, despite of the highest priority of the initialization order,
|
||||||
|
// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
|
||||||
|
// unfortunately and allocates memory with the system allocator. Plus, the
|
||||||
|
// allocated memory will be deallocated with the default zone's `free` at that
|
||||||
|
// moment without using a zone dispatcher. Hence, our own `free` function
|
||||||
|
// receives an address allocated by the system allocator.
|
||||||
|
__attribute__((constructor(0))) void
|
||||||
|
InitializeDefaultMallocZoneWithPartitionAlloc() {
|
||||||
|
if (IsAlreadyRegistered())
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Instantiate the existing regular and purgeable zones in order to make the
|
||||||
|
// existing purgeable zone use the existing regular zone since PartitionAlloc
|
||||||
|
// doesn't support a purgeable zone.
|
||||||
|
std::ignore = malloc_default_zone();
|
||||||
|
std::ignore = malloc_default_purgeable_zone();
|
||||||
|
|
||||||
|
// Initialize the default allocator's PartitionRoot with the existing zone.
|
||||||
|
InitializeDefaultAllocatorPartitionRoot();
|
||||||
|
|
||||||
|
// Create our own malloc zone.
|
||||||
|
InitializeZone();
|
||||||
|
|
||||||
|
malloc_zone_t* system_default_zone = GetDefaultMallocZone();
|
||||||
|
if (strcmp(system_default_zone->zone_name,
|
||||||
|
partition_alloc::kDelegatingZoneName) == 0) {
|
||||||
|
// The first zone is our zone, we can unregister it, replacing it with the
|
||||||
|
// new one. This relies on a precise zone setup, done in
|
||||||
|
// |EarlyMallocZoneRegistration()|.
|
||||||
|
malloc_zone_register(&g_mac_malloc_zone);
|
||||||
|
malloc_zone_unregister(system_default_zone);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not in the path where the zone was registered early. This is either racy,
|
||||||
|
// or fine if the current process is not hosting multiple threads.
|
||||||
|
//
|
||||||
|
// This path is fine for e.g. most unit tests.
|
||||||
|
//
|
||||||
|
// Make our own zone the default zone.
|
||||||
|
//
|
||||||
|
// Put our own zone at the last position, so that it promotes to the default
|
||||||
|
// zone. The implementation logic of malloc_zone_unregister is:
|
||||||
|
// zone_table.swap(unregistered_zone, last_zone);
|
||||||
|
// zone_table.shrink_size_by_1();
|
||||||
|
malloc_zone_register(&g_mac_malloc_zone);
|
||||||
|
malloc_zone_unregister(system_default_zone);
|
||||||
|
// Between malloc_zone_unregister(system_default_zone) (above) and
|
||||||
|
// malloc_zone_register(system_default_zone) (below), i.e. while absence of
|
||||||
|
// system_default_zone, it's possible that another thread calls free(ptr) and
|
||||||
|
// "no zone found" error is hit, crashing the process.
|
||||||
|
malloc_zone_register(system_default_zone);
|
||||||
|
|
||||||
|
// Confirm that our own zone is now the default zone.
|
||||||
|
CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
} // namespace base::allocator
|
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||||
|
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
|
||||||
|
MallocZoneFunctions new_functions;
|
||||||
|
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
|
||||||
|
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||||
|
return ShimGetSizeEstimate(ptr, zone);
|
||||||
|
};
|
||||||
|
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||||
|
return ShimMalloc(size, zone);
|
||||||
|
};
|
||||||
|
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimCalloc(n, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||||
|
return ShimValloc(size, zone);
|
||||||
|
};
|
||||||
|
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
ShimFree(ptr, zone);
|
||||||
|
};
|
||||||
|
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimRealloc(ptr, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested) -> unsigned {
|
||||||
|
return ShimBatchMalloc(size, results, num_requested, zone);
|
||||||
|
};
|
||||||
|
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed) -> void {
|
||||||
|
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
|
||||||
|
};
|
||||||
|
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimMemalign(alignment, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
ShimFreeDefiniteSize(ptr, size, zone);
|
||||||
|
};
|
||||||
|
return new_functions;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This header defines symbols to override the same functions in the Visual C++
|
||||||
|
// CRT implementation.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||||
|
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
// Even though most C++ allocation operators can be left alone since the
|
||||||
|
// interception works at a lower level, these ones should be
|
||||||
|
// overridden. Otherwise they redirect to malloc(), which is configured to crash
|
||||||
|
// with an OOM in failure cases, such as allocation requests that are too large.
|
||||||
|
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
|
||||||
|
const std::nothrow_t&) noexcept {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
|
||||||
|
const std::nothrow_t&) noexcept {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
int win_new_mode = 0;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// This function behaves similarly to MSVC's _set_new_mode.
|
||||||
|
// If flag is 0 (default), calls to malloc will behave normally.
|
||||||
|
// If flag is 1, calls to malloc will behave like calls to new,
|
||||||
|
// and the std_new_handler will be invoked on failure.
|
||||||
|
// Returns the previous mode.
|
||||||
|
//
|
||||||
|
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
|
||||||
|
int _set_new_mode(int flag) {
|
||||||
|
// The MS CRT calls this function early on in startup, so this serves as a low
|
||||||
|
// overhead proof that the allocator shim is in place for this process.
|
||||||
|
base::allocator::g_is_win_shim_layer_initialized = true;
|
||||||
|
int old_mode = win_new_mode;
|
||||||
|
win_new_mode = flag;
|
||||||
|
|
||||||
|
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
|
||||||
|
|
||||||
|
return old_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
|
||||||
|
int _query_new_mode() {
|
||||||
|
return win_new_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
// These symbols override the CRT's implementation of the same functions.
|
||||||
|
__declspec(restrict) void* malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* realloc(void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// _msize() is the Windows equivalent of malloc_size().
|
||||||
|
size_t _msize(void* memblock) {
|
||||||
|
return ShimGetSizeEstimate(memblock, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
|
||||||
|
return ShimAlignedMalloc(size, alignment, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_realloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment) {
|
||||||
|
return ShimAlignedRealloc(address, size, alignment, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void _aligned_free(void* address) {
|
||||||
|
ShimAlignedFree(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// _recalloc_base is called by CRT internally.
|
||||||
|
__declspec(restrict) void* _recalloc_base(void* block,
|
||||||
|
size_t count,
|
||||||
|
size_t size) {
|
||||||
|
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
|
||||||
|
base::CheckedNumeric<size_t> new_block_size_checked = count;
|
||||||
|
new_block_size_checked *= size;
|
||||||
|
const size_t new_block_size = new_block_size_checked.ValueOrDie();
|
||||||
|
|
||||||
|
void* const new_block = realloc(block, new_block_size);
|
||||||
|
|
||||||
|
if (new_block != nullptr && old_block_size < new_block_size) {
|
||||||
|
memset(static_cast<char*>(new_block) + old_block_size, 0,
|
||||||
|
new_block_size - old_block_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _malloc_base(size_t size) {
|
||||||
|
return malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
|
||||||
|
return calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void _free_base(void* block) {
|
||||||
|
free(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
|
||||||
|
return _recalloc_base(block, count, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following uncommon _aligned_* routines are not used in Chromium and have
|
||||||
|
// been shimmed to immediately crash to ensure that implementations are added if
|
||||||
|
// uses are introduced.
|
||||||
|
__declspec(restrict) void* _aligned_recalloc(void* address,
|
||||||
|
size_t num,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_realloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
|
||||||
|
size_t num,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
24
src/base/allocator/dispatcher/configuration.h
Normal file
24
src/base/allocator/dispatcher/configuration.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::configuration {
|
||||||
|
|
||||||
|
// The maximum number of optional observers that may be present depending on
|
||||||
|
// command line parameters.
|
||||||
|
constexpr size_t kMaximumNumberOfOptionalObservers = 4;
|
||||||
|
|
||||||
|
// The total number of observers including mandatory and optional observers.
|
||||||
|
// Primarily the number of observers affects the performance at allocation time.
|
||||||
|
// The current value of 4 doesn't have hard evidence. Keep in mind that
|
||||||
|
// also a single observer can severely impact performance.
|
||||||
|
constexpr size_t kMaximumNumberOfObservers = 4;
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::configuration
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
|
341
src/base/allocator/dispatcher/dispatcher.cc
Normal file
341
src/base/allocator/dispatcher/dispatcher.cc
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/dispatcher.h"
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
#include "base/allocator/dispatcher/reentry_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/dcheck_is_on.h"
|
||||||
|
#include "base/no_destructor.h"
|
||||||
|
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
#include <atomic>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
namespace base::allocator::dispatcher::allocator_shim_details {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* address = self->next->alloc_function(self->next, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocUncheckedFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* address =
|
||||||
|
self->next->alloc_unchecked_function(self->next, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocZeroInitializedFn(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* address =
|
||||||
|
self->next->alloc_zero_initialized_function(self->next, n, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, n * size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocAlignedFn(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* address =
|
||||||
|
self->next->alloc_aligned_function(self->next, alignment, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* ReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
address = self->next->realloc_function(self->next, address, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
|
||||||
|
// Note: The RecordFree should be called before free_function
|
||||||
|
// (here and in other places).
|
||||||
|
// That is because we need to remove the recorded allocation sample before
|
||||||
|
// free_function, as once the latter is executed the address becomes available
|
||||||
|
// and can be allocated by another thread. That would be racy otherwise.
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
self->next->free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSizeEstimateFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return self->next->get_size_estimate_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned BatchMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
unsigned num_allocated = self->next->batch_malloc_function(
|
||||||
|
self->next, size, results, num_requested, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
for (unsigned i = 0; i < num_allocated; ++i) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
results[i], size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return num_allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchFreeFn(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
for (unsigned i = 0; i < num_to_be_freed; ++i)
|
||||||
|
PoissonAllocationSampler::RecordFree(to_be_freed[i]);
|
||||||
|
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
|
||||||
|
context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
self->next->free_definite_size_function(self->next, address, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* address =
|
||||||
|
self->next->aligned_malloc_function(self->next, size, alignment, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
address = self->next->aligned_realloc_function(self->next, address, size,
|
||||||
|
alignment, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kMalloc, nullptr);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AlignedFreeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
self->next->aligned_free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocatorDispatch g_allocator_dispatch = {&AllocFn,
|
||||||
|
&AllocUncheckedFn,
|
||||||
|
&AllocZeroInitializedFn,
|
||||||
|
&AllocAlignedFn,
|
||||||
|
&ReallocFn,
|
||||||
|
&FreeFn,
|
||||||
|
&GetSizeEstimateFn,
|
||||||
|
&BatchMallocFn,
|
||||||
|
&BatchFreeFn,
|
||||||
|
&FreeDefiniteSizeFn,
|
||||||
|
&AlignedMallocFn,
|
||||||
|
&AlignedReallocFn,
|
||||||
|
&AlignedFreeFn,
|
||||||
|
nullptr};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace base::allocator::dispatcher::allocator_shim_details
|
||||||
|
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
|
||||||
|
namespace base::allocator::dispatcher::partition_allocator_details {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void PartitionAllocHook(void* address, size_t size, const char* type) {
|
||||||
|
PoissonAllocationSampler::RecordAlloc(
|
||||||
|
address, size, PoissonAllocationSampler::kPartitionAlloc, type);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionFreeHook(void* address) {
|
||||||
|
PoissonAllocationSampler::RecordFree(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace base::allocator::dispatcher::partition_allocator_details
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
void InstallStandardAllocatorHooks() {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
allocator::InsertAllocatorDispatch(
|
||||||
|
&allocator_shim_details::g_allocator_dispatch);
|
||||||
|
#else
|
||||||
|
// If the allocator shim isn't available, then we don't install any hooks.
|
||||||
|
// There's no point in printing an error message, since this can regularly
|
||||||
|
// happen for tests.
|
||||||
|
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(
|
||||||
|
&partition_allocator_details::PartitionAllocHook,
|
||||||
|
&partition_allocator_details::PartitionFreeHook);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveStandardAllocatorHooksForTesting() {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
allocator::RemoveAllocatorDispatchForTesting(
|
||||||
|
&allocator_shim_details::g_allocator_dispatch); // IN-TEST
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
// The private implementation of Dispatcher.
|
||||||
|
struct Dispatcher::Impl {
|
||||||
|
void Initialize(const internal::DispatchData& dispatch_data) {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
DCHECK(!is_initialized_check_flag_.test_and_set());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
dispatch_data_ = dispatch_data;
|
||||||
|
ConnectToEmitters(dispatch_data_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Reset() {
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
DCHECK([&]() {
|
||||||
|
auto const was_set = is_initialized_check_flag_.test();
|
||||||
|
is_initialized_check_flag_.clear();
|
||||||
|
return was_set;
|
||||||
|
}());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
DisconnectFromEmitters(dispatch_data_);
|
||||||
|
dispatch_data_ = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Connect the hooks to the memory subsystem. In some cases, most notably when
|
||||||
|
// we have no observers at all, the hooks will be invalid and must NOT be
|
||||||
|
// connected. This way we prevent notifications although no observers are
|
||||||
|
// present.
|
||||||
|
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||||
|
allocator::InsertAllocatorDispatch(allocator_dispatch);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
{
|
||||||
|
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
|
||||||
|
auto* const free_hook = dispatch_data.GetFreeObserverHook();
|
||||||
|
if (allocation_hook && free_hook) {
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(allocation_hook,
|
||||||
|
free_hook);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
|
||||||
|
allocator::RemoveAllocatorDispatchForTesting(
|
||||||
|
allocator_dispatch); // IN-TEST
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Information on the hooks.
|
||||||
|
internal::DispatchData dispatch_data_;
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
// Indicator if the dispatcher has been initialized before.
|
||||||
|
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||||
|
__cpp_lib_atomic_value_initialization < 201911L
|
||||||
|
std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT;
|
||||||
|
#else
|
||||||
|
std::atomic_flag is_initialized_check_flag_;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
Dispatcher::Dispatcher() : impl_(std::make_unique<Impl>()) {}
|
||||||
|
|
||||||
|
Dispatcher::~Dispatcher() = default;
|
||||||
|
|
||||||
|
Dispatcher& Dispatcher::GetInstance() {
|
||||||
|
static base::NoDestructor<Dispatcher> instance;
|
||||||
|
return *instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::Initialize(const internal::DispatchData& dispatch_data) {
|
||||||
|
impl_->Initialize(dispatch_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Dispatcher::ResetForTesting() {
|
||||||
|
impl_->Reset();
|
||||||
|
}
|
||||||
|
} // namespace base::allocator::dispatcher
|
78
src/base/allocator/dispatcher/dispatcher.h
Normal file
78
src/base/allocator/dispatcher/dispatcher.h
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
void BASE_EXPORT InstallStandardAllocatorHooks();
|
||||||
|
void BASE_EXPORT RemoveStandardAllocatorHooksForTesting();
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
struct DispatchData;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dispatcher serves as the top level instance for managing the dispatch
|
||||||
|
// mechanism. The class instance manages connections to the various memory
|
||||||
|
// subsystems such as PartitionAlloc. To keep the public interface as lean as
|
||||||
|
// possible it uses a pimpl pattern.
|
||||||
|
class BASE_EXPORT Dispatcher {
|
||||||
|
public:
|
||||||
|
static Dispatcher& GetInstance();
|
||||||
|
|
||||||
|
Dispatcher();
|
||||||
|
|
||||||
|
// Initialize the dispatch mechanism with the given tuple of observers. The
|
||||||
|
// observers must be valid (it is only DCHECKed internally at initialization,
|
||||||
|
// but not verified further)
|
||||||
|
// If Initialize is called multiple times, the first one wins. All later
|
||||||
|
// invocations are silently ignored. Initialization is protected from
|
||||||
|
// concurrent invocations. In case of concurrent accesses, the first one to
|
||||||
|
// get the lock wins.
|
||||||
|
// The dispatcher invokes following functions on the observers:
|
||||||
|
// void OnAllocation(void* address,
|
||||||
|
// size_t size,
|
||||||
|
// AllocationSubsystem sub_system,
|
||||||
|
// const char* type_name);
|
||||||
|
// void OnFree(void* address);
|
||||||
|
//
|
||||||
|
// Note: The dispatcher mechanism does NOT bring systematic protection against
|
||||||
|
// recursive invocations. That is, observers which allocate memory on the
|
||||||
|
// heap, i.e. through dynamically allocated containers or by using the
|
||||||
|
// CHECK-macro, are responsible to break these recursions!
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
void Initialize(const std::tuple<ObserverTypes...>& observers) {
|
||||||
|
// Get the hooks for running these observers and pass them to further
|
||||||
|
// initialization
|
||||||
|
Initialize(internal::GetNotificationHooks(observers));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following functions provide an interface to setup and tear down the
|
||||||
|
// dispatcher when testing. This must NOT be used from production code since
|
||||||
|
// the hooks cannot be removed reliably under all circumstances.
|
||||||
|
template <typename ObserverType>
|
||||||
|
void InitializeForTesting(ObserverType* observer) {
|
||||||
|
Initialize(std::make_tuple(observer));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// structure and pointer to the private implementation.
|
||||||
|
struct Impl;
|
||||||
|
std::unique_ptr<Impl> const impl_;
|
||||||
|
|
||||||
|
~Dispatcher();
|
||||||
|
|
||||||
|
void Initialize(const internal::DispatchData& dispatch_data);
|
||||||
|
};
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
206
src/base/allocator/dispatcher/initializer.h
Normal file
206
src/base/allocator/dispatcher/initializer.h
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/configuration.h"
|
||||||
|
#include "base/allocator/dispatcher/dispatcher.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/tools.h"
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Filter the passed observers and perform initialization of the passed
|
||||||
|
// dispatcher.
|
||||||
|
template <size_t CurrentIndex,
|
||||||
|
typename DispatcherType,
|
||||||
|
typename CheckObserverPredicate,
|
||||||
|
typename VerifiedObservers,
|
||||||
|
typename UnverifiedObservers,
|
||||||
|
size_t... IndicesToSelect>
|
||||||
|
inline void DoInitialize(DispatcherType& dispatcher,
|
||||||
|
CheckObserverPredicate check_observer,
|
||||||
|
const VerifiedObservers& verified_observers,
|
||||||
|
const UnverifiedObservers& unverified_observers,
|
||||||
|
std::index_sequence<IndicesToSelect...> indices) {
|
||||||
|
if constexpr (CurrentIndex < std::tuple_size<UnverifiedObservers>::value) {
|
||||||
|
// We still have some items left to handle.
|
||||||
|
if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
|
||||||
|
// The current observer is valid. Hence, append the index of the current
|
||||||
|
// item to the set of indices and head on to the next item.
|
||||||
|
DoInitialize<CurrentIndex + 1>(
|
||||||
|
dispatcher, check_observer, verified_observers, unverified_observers,
|
||||||
|
std::index_sequence<IndicesToSelect..., CurrentIndex>{});
|
||||||
|
} else {
|
||||||
|
// The current observer is not valid. Hence, head on to the next item with
|
||||||
|
// an unaltered list of indices.
|
||||||
|
DoInitialize<CurrentIndex + 1>(dispatcher, check_observer,
|
||||||
|
verified_observers, unverified_observers,
|
||||||
|
indices);
|
||||||
|
}
|
||||||
|
} else if constexpr (CurrentIndex ==
|
||||||
|
std::tuple_size<UnverifiedObservers>::value) {
|
||||||
|
// So we have met the end of the tuple of observers to verify.
|
||||||
|
// Hence, we extract the additional valid observers, append to the tuple of
|
||||||
|
// already verified observers and hand over to the dispatcher.
|
||||||
|
auto observers = std::tuple_cat(
|
||||||
|
verified_observers,
|
||||||
|
std::make_tuple(std::get<IndicesToSelect>(unverified_observers)...));
|
||||||
|
|
||||||
|
// Do a final check that neither the maximum total number of observers nor
|
||||||
|
// the maximum number of optional observers is exceeded.
|
||||||
|
static_assert(std::tuple_size<decltype(observers)>::value <=
|
||||||
|
configuration::kMaximumNumberOfObservers);
|
||||||
|
static_assert(sizeof...(IndicesToSelect) <=
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers);
|
||||||
|
|
||||||
|
dispatcher.Initialize(std::move(observers));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// The result of concatenating two tuple-types.
|
||||||
|
template <typename... tuples>
|
||||||
|
using TupleCat = decltype(std::tuple_cat(std::declval<tuples>()...));
|
||||||
|
|
||||||
|
// Initializer collects mandatory and optional observers and initializes the
|
||||||
|
// passed Dispatcher with only the enabled observers.
|
||||||
|
//
|
||||||
|
// In some situations, presence of observers depends on runtime. i.e. command
|
||||||
|
// line parameters or CPU features. With 3 optional observers we already have 8
|
||||||
|
// different combinations. Initializer takes the job of dealing with all
|
||||||
|
// combinations from the user. It allows users to pass all observers (including
|
||||||
|
// nullptr for disabled optional observers) and initializes the Dispatcher with
|
||||||
|
// only the enabled observers.
|
||||||
|
//
|
||||||
|
// Since this process results in a combinatoric explosion, Initializer
|
||||||
|
// distinguishes between optional and mandatory observers. Mandatory observers
|
||||||
|
// are not included in the filtering process and must always be enabled (not
|
||||||
|
// nullptr).
|
||||||
|
//
|
||||||
|
// To allow the Initializer to track the number and exact type of observers, it
|
||||||
|
// is implemented as a templated class which holds information on the types in
|
||||||
|
// the std::tuples passed as template parameters. Therefore, whenever any type
|
||||||
|
// observer it set, the initializer changes its type to reflect this.
|
||||||
|
template <typename MandatoryObservers = std::tuple<>,
|
||||||
|
typename OptionalObservers = std::tuple<>>
|
||||||
|
struct BASE_EXPORT Initializer {
|
||||||
|
Initializer() = default;
|
||||||
|
Initializer(MandatoryObservers mandatory_observers,
|
||||||
|
OptionalObservers optional_observers)
|
||||||
|
: mandatory_observers_(std::move(mandatory_observers)),
|
||||||
|
optional_observers_(std::move(optional_observers)) {}
|
||||||
|
|
||||||
|
// Set the mandatory observers. The number of observers that can be set is
|
||||||
|
// limited by configuration::maximum_number_of_observers.
|
||||||
|
template <typename... NewMandatoryObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual((sizeof...(NewMandatoryObservers) +
|
||||||
|
std::tuple_size<OptionalObservers>::value),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
|
||||||
|
SetMandatoryObservers(NewMandatoryObservers*... mandatory_observers) const {
|
||||||
|
return {std::make_tuple(mandatory_observers...), GetOptionalObservers()};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add mandatory observers. The number of observers that can be added is
|
||||||
|
// limited by the current number of observers, see
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <typename... AdditionalMandatoryObservers,
|
||||||
|
std::enable_if_t<internal::LessEqual(
|
||||||
|
std::tuple_size<MandatoryObservers>::value +
|
||||||
|
sizeof...(AdditionalMandatoryObservers) +
|
||||||
|
std::tuple_size<OptionalObservers>::value,
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<TupleCat<MandatoryObservers,
|
||||||
|
std::tuple<AdditionalMandatoryObservers*...>>,
|
||||||
|
OptionalObservers>
|
||||||
|
AddMandatoryObservers(
|
||||||
|
AdditionalMandatoryObservers*... additional_mandatory_observers) const {
|
||||||
|
return {std::tuple_cat(GetMandatoryObservers(),
|
||||||
|
std::make_tuple(additional_mandatory_observers...)),
|
||||||
|
GetOptionalObservers()};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the optional observers. The number of observers that can be set is
|
||||||
|
// limited by configuration::maximum_number_of_optional_observers as well as
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <
|
||||||
|
typename... NewOptionalObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual(
|
||||||
|
sizeof...(NewOptionalObservers),
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||||
|
internal::LessEqual((sizeof...(NewOptionalObservers) +
|
||||||
|
std::tuple_size<MandatoryObservers>::value),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
|
||||||
|
SetOptionalObservers(NewOptionalObservers*... optional_observers) const {
|
||||||
|
return {GetMandatoryObservers(), std::make_tuple(optional_observers...)};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add optional observers. The number of observers that can be added is
|
||||||
|
// limited by the current number of optional observers,
|
||||||
|
// configuration::maximum_number_of_optional_observers as well as
|
||||||
|
// configuration::maximum_number_of_observers.
|
||||||
|
template <
|
||||||
|
typename... AdditionalOptionalObservers,
|
||||||
|
std::enable_if_t<
|
||||||
|
internal::LessEqual(
|
||||||
|
std::tuple_size<OptionalObservers>::value +
|
||||||
|
sizeof...(AdditionalOptionalObservers),
|
||||||
|
configuration::kMaximumNumberOfOptionalObservers) &&
|
||||||
|
internal::LessEqual((std::tuple_size<OptionalObservers>::value +
|
||||||
|
sizeof...(AdditionalOptionalObservers) +
|
||||||
|
std::tuple_size<MandatoryObservers>::value),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
Initializer<
|
||||||
|
MandatoryObservers,
|
||||||
|
TupleCat<OptionalObservers, std::tuple<AdditionalOptionalObservers*...>>>
|
||||||
|
AddOptionalObservers(
|
||||||
|
AdditionalOptionalObservers*... additional_optional_observers) const {
|
||||||
|
return {GetMandatoryObservers(),
|
||||||
|
std::tuple_cat(GetOptionalObservers(),
|
||||||
|
std::make_tuple(additional_optional_observers...))};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the actual initialization on the passed dispatcher.
|
||||||
|
// The dispatcher is passed as a template only to provide better testability.
|
||||||
|
template <typename DispatcherType>
|
||||||
|
void DoInitialize(DispatcherType& dispatcher) const {
|
||||||
|
internal::DoInitialize<0>(dispatcher, internal::IsValidObserver{},
|
||||||
|
GetMandatoryObservers(), GetOptionalObservers(),
|
||||||
|
{});
|
||||||
|
}
|
||||||
|
|
||||||
|
const MandatoryObservers& GetMandatoryObservers() const {
|
||||||
|
return mandatory_observers_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const OptionalObservers& GetOptionalObservers() const {
|
||||||
|
return optional_observers_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
MandatoryObservers mandatory_observers_;
|
||||||
|
OptionalObservers optional_observers_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Convenience function for creating an empty Initializer.
|
||||||
|
inline Initializer<> CreateInitializer() {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
|
41
src/base/allocator/dispatcher/internal/dispatch_data.cc
Normal file
41
src/base/allocator/dispatcher/internal/dispatch_data.cc
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
|
||||||
|
DispatchData& DispatchData::SetAllocationObserverHooks(
|
||||||
|
AllocationObserverHook* allocation_observer_hook,
|
||||||
|
FreeObserverHook* free_observer_hook) {
|
||||||
|
allocation_observer_hook_ = allocation_observer_hook;
|
||||||
|
free_observer_hook_ = free_observer_hook;
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
DispatchData::AllocationObserverHook* DispatchData::GetAllocationObserverHook()
|
||||||
|
const {
|
||||||
|
return allocation_observer_hook_;
|
||||||
|
}
|
||||||
|
|
||||||
|
DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
|
||||||
|
return free_observer_hook_;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
DispatchData& DispatchData::SetAllocatorDispatch(
|
||||||
|
AllocatorDispatch* allocator_dispatch) {
|
||||||
|
allocator_dispatch_ = allocator_dispatch;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocatorDispatch* DispatchData::GetAllocatorDispatch() const {
|
||||||
|
return allocator_dispatch_;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
54
src/base/allocator/dispatcher/internal/dispatch_data.h
Normal file
54
src/base/allocator/dispatcher/internal/dispatch_data.h
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
// A simple utility class to pass all the information required to properly hook
|
||||||
|
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
|
||||||
|
struct BASE_EXPORT DispatchData {
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
using AllocationObserverHook =
|
||||||
|
partition_alloc::PartitionAllocHooks::AllocationObserverHook;
|
||||||
|
using FreeObserverHook =
|
||||||
|
partition_alloc::PartitionAllocHooks::FreeObserverHook;
|
||||||
|
|
||||||
|
DispatchData& SetAllocationObserverHooks(AllocationObserverHook*,
|
||||||
|
FreeObserverHook*);
|
||||||
|
AllocationObserverHook* GetAllocationObserverHook() const;
|
||||||
|
FreeObserverHook* GetFreeObserverHook() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
AllocationObserverHook* allocation_observer_hook_ = nullptr;
|
||||||
|
FreeObserverHook* free_observer_hook_ = nullptr;
|
||||||
|
|
||||||
|
public:
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
|
||||||
|
AllocatorDispatch* GetAllocatorDispatch() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
AllocatorDispatch* allocator_dispatch_ = nullptr;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif
|
351
src/base/allocator/dispatcher/internal/dispatcher_internal.h
Normal file
351
src/base/allocator/dispatcher/internal/dispatcher_internal.h
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/dispatcher/configuration.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/dispatch_data.h"
|
||||||
|
#include "base/allocator/dispatcher/internal/tools.h"
|
||||||
|
#include "base/allocator/dispatcher/reentry_guard.h"
|
||||||
|
#include "base/allocator/dispatcher/subsystem.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
template <typename CheckObserverPredicate,
|
||||||
|
typename... ObserverTypes,
|
||||||
|
size_t... Indices>
|
||||||
|
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
CheckObserverPredicate check_observer) {
|
||||||
|
((DCHECK(check_observer(std::get<Indices>(observers)))), ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... ObserverTypes, size_t... Indices>
|
||||||
|
ALWAYS_INLINE void PerformAllocationNotification(
|
||||||
|
const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
AllocationSubsystem subSystem,
|
||||||
|
const char* type_name) {
|
||||||
|
((std::get<Indices>(observers)->OnAllocation(address, size, subSystem,
|
||||||
|
type_name)),
|
||||||
|
...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... ObserverTypes, size_t... Indices>
|
||||||
|
ALWAYS_INLINE void PerformFreeNotification(
|
||||||
|
const std::tuple<ObserverTypes...>& observers,
|
||||||
|
std::index_sequence<Indices...>,
|
||||||
|
void* address) {
|
||||||
|
((std::get<Indices>(observers)->OnFree(address)), ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
// DispatcherImpl provides hooks into the various memory subsystems. These hooks
|
||||||
|
// are responsible for dispatching any notification to the observers.
|
||||||
|
// In order to provide as many information on the exact type of the observer and
|
||||||
|
// prevent any conditional jumps in the hot allocation path, observers are
|
||||||
|
// stored in a std::tuple. DispatcherImpl performs a CHECK at initialization
|
||||||
|
// time to ensure they are valid.
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
struct DispatcherImpl {
|
||||||
|
using AllObservers = std::index_sequence_for<ObserverTypes...>;
|
||||||
|
|
||||||
|
template <std::enable_if_t<
|
||||||
|
internal::LessEqual(sizeof...(ObserverTypes),
|
||||||
|
configuration::kMaximumNumberOfObservers),
|
||||||
|
bool> = true>
|
||||||
|
static DispatchData GetNotificationHooks(
|
||||||
|
std::tuple<ObserverTypes*...> observers) {
|
||||||
|
s_observers = std::move(observers);
|
||||||
|
|
||||||
|
PerformObserverCheck(s_observers, AllObservers{}, IsValidObserver{});
|
||||||
|
|
||||||
|
return CreateDispatchData();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static DispatchData CreateDispatchData() {
|
||||||
|
return DispatchData()
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
|
||||||
|
&PartitionAllocatorFreeHook)
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
.SetAllocatorDispatch(&allocator_dispatch_)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
static void PartitionAllocatorAllocationHook(void* address,
|
||||||
|
size_t size,
|
||||||
|
const char* type_name) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kPartitionAllocator,
|
||||||
|
type_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void PartitionAllocatorFreeHook(void* address) {
|
||||||
|
DoNotifyFree(address);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
static void* AllocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* const address = self->next->alloc_function(self->next, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocUncheckedFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* const address =
|
||||||
|
self->next->alloc_unchecked_function(self->next, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* const address = self->next->alloc_zero_initialized_function(
|
||||||
|
self->next, n, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, n * size,
|
||||||
|
AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AllocAlignedFn(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* const address = self->next->alloc_aligned_function(
|
||||||
|
self->next, alignment, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* ReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
// Note: ReentryGuard prevents from recursions introduced by malloc and
|
||||||
|
// initialization of thread local storage which happen in the allocation
|
||||||
|
// path only (please see docs of ReentryGuard for full details). Therefore,
|
||||||
|
// the DoNotifyFree doesn't need to be guarded. Instead, making it unguarded
|
||||||
|
// also ensures proper notification.
|
||||||
|
DoNotifyFree(address);
|
||||||
|
void* const reallocated_address =
|
||||||
|
self->next->realloc_function(self->next, address, size, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(reallocated_address, size,
|
||||||
|
AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return reallocated_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void FreeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// Note: The RecordFree should be called before free_function (here and in
|
||||||
|
// other places). That is because observers need to handle the allocation
|
||||||
|
// being freed before calling free_function, as once the latter is executed
|
||||||
|
// the address becomes available and can be allocated by another thread.
|
||||||
|
// That would be racy otherwise.
|
||||||
|
// Note: The code doesn't need to protect from recursions using
|
||||||
|
// ReentryGuard, see ReallocFn for details.
|
||||||
|
DoNotifyFree(address);
|
||||||
|
self->next->free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t GetSizeEstimateFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return self->next->get_size_estimate_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned BatchMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
unsigned const num_allocated = self->next->batch_malloc_function(
|
||||||
|
self->next, size, results, num_requested, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
for (unsigned i = 0; i < num_allocated; ++i) {
|
||||||
|
DoNotifyAllocation(results[i], size,
|
||||||
|
AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return num_allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BatchFreeFn(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
// Note: The code doesn't need to protect from recursions using
|
||||||
|
// ReentryGuard, see ReallocFn for details.
|
||||||
|
for (unsigned i = 0; i < num_to_be_freed; ++i) {
|
||||||
|
DoNotifyFree(to_be_freed[i]);
|
||||||
|
}
|
||||||
|
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
|
||||||
|
context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
// Note: The code doesn't need to protect from recursions using
|
||||||
|
// ReentryGuard, see ReallocFn for details.
|
||||||
|
DoNotifyFree(address);
|
||||||
|
self->next->free_definite_size_function(self->next, address, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedMallocFn(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
void* const address = self->next->aligned_malloc_function(
|
||||||
|
self->next, size, alignment, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* AlignedReallocFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ReentryGuard guard;
|
||||||
|
// Note: size == 0 actually performs free.
|
||||||
|
// Note: DoNotifyFree doesn't need to protect from recursions using
|
||||||
|
// ReentryGuard, see ReallocFn for details.
|
||||||
|
// Instead, making it unguarded also ensures proper notification of the free
|
||||||
|
// portion.
|
||||||
|
DoNotifyFree(address);
|
||||||
|
address = self->next->aligned_realloc_function(self->next, address, size,
|
||||||
|
alignment, context);
|
||||||
|
if (LIKELY(guard)) {
|
||||||
|
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
|
||||||
|
}
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AlignedFreeFn(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// Note: The code doesn't need to protect from recursions using
|
||||||
|
// ReentryGuard, see ReallocFn for details.
|
||||||
|
DoNotifyFree(address);
|
||||||
|
self->next->aligned_free_function(self->next, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static AllocatorDispatch allocator_dispatch_;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static ALWAYS_INLINE void DoNotifyAllocation(
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
AllocationSubsystem subSystem,
|
||||||
|
const char* type_name = nullptr) {
|
||||||
|
PerformAllocationNotification(s_observers, AllObservers{}, address, size,
|
||||||
|
subSystem, type_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE void DoNotifyFree(void* address) {
|
||||||
|
PerformFreeNotification(s_observers, AllObservers{}, address);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::tuple<ObserverTypes*...> s_observers;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
|
||||||
|
&AllocFn,
|
||||||
|
&AllocUncheckedFn,
|
||||||
|
&AllocZeroInitializedFn,
|
||||||
|
&AllocAlignedFn,
|
||||||
|
&ReallocFn,
|
||||||
|
&FreeFn,
|
||||||
|
&GetSizeEstimateFn,
|
||||||
|
&BatchMallocFn,
|
||||||
|
&BatchFreeFn,
|
||||||
|
&FreeDefiniteSizeFn,
|
||||||
|
&AlignedMallocFn,
|
||||||
|
&AlignedReallocFn,
|
||||||
|
&AlignedFreeFn,
|
||||||
|
nullptr};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Specialization of DispatcherImpl in case we have no observers to notify. In
|
||||||
|
// this special case we return a set of null pointers as the Dispatcher must not
|
||||||
|
// install any hooks at all.
|
||||||
|
template <>
|
||||||
|
struct DispatcherImpl<> {
|
||||||
|
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
|
||||||
|
return DispatchData()
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC)
|
||||||
|
.SetAllocationObserverHooks(nullptr, nullptr)
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
.SetAllocatorDispatch(nullptr)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// A little utility function that helps using DispatcherImpl by providing
|
||||||
|
// automated type deduction for templates.
|
||||||
|
template <typename... ObserverTypes>
|
||||||
|
inline DispatchData GetNotificationHooks(
|
||||||
|
std::tuple<ObserverTypes*...> observers) {
|
||||||
|
return DispatcherImpl<ObserverTypes...>::GetNotificationHooks(
|
||||||
|
std::move(observers));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
|
29
src/base/allocator/dispatcher/internal/tools.h
Normal file
29
src/base/allocator/dispatcher/internal/tools.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::internal {
|
||||||
|
|
||||||
|
constexpr bool LessEqual(size_t lhs, size_t rhs) {
|
||||||
|
return lhs <= rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Equal(size_t lhs, size_t rhs) {
|
||||||
|
return lhs == rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct IsValidObserver {
|
||||||
|
template <typename T>
|
||||||
|
constexpr bool operator()(T const* ptr) const noexcept {
|
||||||
|
return ptr != nullptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_H_
|
34
src/base/allocator/dispatcher/reentry_guard.cc
Normal file
34
src/base/allocator/dispatcher/reentry_guard.cc
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/reentry_guard.h"
|
||||||
|
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
#include <pthread.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
pthread_key_t ReentryGuard::entered_key_ = 0;
|
||||||
|
|
||||||
|
void ReentryGuard::InitTLSSlot() {
|
||||||
|
if (entered_key_ == 0) {
|
||||||
|
int error = pthread_key_create(&entered_key_, nullptr);
|
||||||
|
CHECK(!error);
|
||||||
|
}
|
||||||
|
|
||||||
|
DCHECK(entered_key_ != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
void ReentryGuard::InitTLSSlot() {}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
} // namespace base::allocator::dispatcher
|
65
src/base/allocator/dispatcher/reentry_guard.h
Normal file
65
src/base/allocator/dispatcher/reentry_guard.h
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
#include <pthread.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
// The macOS implementation of libmalloc sometimes calls malloc recursively,
|
||||||
|
// delegating allocations between zones. That causes our hooks being called
|
||||||
|
// twice. The scoped guard allows us to detect that.
|
||||||
|
//
|
||||||
|
// Besides that the implementations of thread_local on macOS and Android
|
||||||
|
// seem to allocate memory lazily on the first access to thread_local variables.
|
||||||
|
// Make use of pthread TLS instead of C++ thread_local there.
|
||||||
|
struct BASE_EXPORT ReentryGuard {
|
||||||
|
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
|
||||||
|
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
~ReentryGuard() {
|
||||||
|
if (LIKELY(allowed_))
|
||||||
|
pthread_setspecific(entered_key_, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit operator bool() const noexcept { return allowed_; }
|
||||||
|
|
||||||
|
// This function must be called in very early of the process start-up in
|
||||||
|
// order to acquire a low TLS slot number because glibc TLS implementation
|
||||||
|
// will require a malloc call to allocate storage for a higher slot number
|
||||||
|
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
|
||||||
|
static void InitTLSSlot();
|
||||||
|
|
||||||
|
private:
|
||||||
|
static pthread_key_t entered_key_;
|
||||||
|
const bool allowed_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
|
||||||
|
// ReentryGuard above will otherwise trigger the "unused code" warnings.
|
||||||
|
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
|
||||||
|
constexpr explicit operator bool() const noexcept { return true; }
|
||||||
|
|
||||||
|
static void InitTLSSlot();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
|
21
src/base/allocator/dispatcher/subsystem.h
Normal file
21
src/base/allocator/dispatcher/subsystem.h
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher {
|
||||||
|
|
||||||
|
// Identifiers for the memory subsystem handling the allocation. Some observers
|
||||||
|
// require more detailed information on who is performing the allocation, i.e.
|
||||||
|
// SamplingHeapProfiler.
|
||||||
|
enum class AllocationSubsystem {
|
||||||
|
// Allocation is handled by PartitionAllocator.
|
||||||
|
kPartitionAllocator = 1,
|
||||||
|
// Allocation is handled by AllocatorShims.
|
||||||
|
kAllocatorShim = 2
|
||||||
|
};
|
||||||
|
} // namespace base::allocator::dispatcher
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
|
27
src/base/allocator/dispatcher/testing/dispatcher_test.h
Normal file
27
src/base/allocator/dispatcher/testing/dispatcher_test.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
||||||
|
|
||||||
|
#include "testing/gtest/include/gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::testing {
|
||||||
|
|
||||||
|
// DispatcherTest provides some common initialization which most of the
|
||||||
|
// unittests of the dispatcher require. DispatcherTest should not be used
|
||||||
|
// directly. Instead, derive your test fixture from it.
|
||||||
|
struct DispatcherTest : public ::testing::Test {
|
||||||
|
// Perform some commonly required initialization, at them moment
|
||||||
|
// - Initialize the TLS slot for the ReentryGuard
|
||||||
|
DispatcherTest();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Protected d'tor only to prevent direct usage of this class.
|
||||||
|
~DispatcherTest() override;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::testing
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
|
32
src/base/allocator/dispatcher/testing/observer_mock.h
Normal file
32
src/base/allocator/dispatcher/testing/observer_mock.h
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
||||||
|
|
||||||
|
#include "base/allocator/dispatcher/subsystem.h"
|
||||||
|
#include "testing/gmock/include/gmock/gmock.h"
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::testing {
|
||||||
|
|
||||||
|
// ObserverMock is a small mock class based on GoogleMock.
|
||||||
|
// It complies to the interface enforced by the dispatcher. The template
|
||||||
|
// parameter serves only to create distinct types of observers if required.
|
||||||
|
template <typename T = void>
|
||||||
|
struct ObserverMock {
|
||||||
|
MOCK_METHOD(void,
|
||||||
|
OnAllocation,
|
||||||
|
(void* address,
|
||||||
|
size_t size,
|
||||||
|
AllocationSubsystem sub_system,
|
||||||
|
const char* type_name),
|
||||||
|
());
|
||||||
|
MOCK_METHOD(void, OnFree, (void* address), ());
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::testing
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
|
50
src/base/allocator/dispatcher/testing/tools.h
Normal file
50
src/base/allocator/dispatcher/testing/tools.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||||
|
#define BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <tuple>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
namespace base::allocator::dispatcher::testing {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
template <size_t Size, typename Type, typename... AppendedTypes>
|
||||||
|
struct DefineTupleFromSingleType {
|
||||||
|
using type = typename DefineTupleFromSingleType<Size - 1,
|
||||||
|
Type,
|
||||||
|
AppendedTypes...,
|
||||||
|
Type>::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Type, typename... AppendedTypes>
|
||||||
|
struct DefineTupleFromSingleType<0, Type, AppendedTypes...> {
|
||||||
|
using type = std::tuple<AppendedTypes...>;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
template <size_t Size, typename Type>
|
||||||
|
struct DefineTupleFromSingleType {
|
||||||
|
using type = typename internal::DefineTupleFromSingleType<Size, Type>::type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Type, size_t Size, size_t... Indices>
|
||||||
|
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||||
|
CreateTupleOfPointers(std::array<Type, Size>& items,
|
||||||
|
std::index_sequence<Indices...>) {
|
||||||
|
return std::make_tuple((&items[Indices])...);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Type, size_t Size>
|
||||||
|
typename internal::DefineTupleFromSingleType<Size, Type*>::type
|
||||||
|
CreateTupleOfPointers(std::array<Type, Size>& items) {
|
||||||
|
return CreateTupleOfPointers(items, std::make_index_sequence<Size>{});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace base::allocator::dispatcher::testing
|
||||||
|
|
||||||
|
#endif
|
256
src/base/allocator/early_zone_registration_mac.cc
Normal file
256
src/base/allocator/early_zone_registration_mac.cc
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/early_zone_registration_mac.h"
|
||||||
|
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
|
||||||
|
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
|
||||||
|
#if defined(BASE_EXPORT)
|
||||||
|
#error "This file cannot depend on //base"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {}
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
// abort_report_np() records the message in a special section that both the
|
||||||
|
// system CrashReporter and Crashpad collect in crash reports. See also in
|
||||||
|
// chrome_exe_main_mac.cc.
|
||||||
|
void abort_report_np(const char* fmt, ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
malloc_zone_t* GetDefaultMallocZone() {
|
||||||
|
// malloc_default_zone() does not return... the default zone, but the
|
||||||
|
// initial one. The default one is the first element of the default zone
|
||||||
|
// array.
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS)
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {
|
||||||
|
// Must have static storage duration, as raw pointers are passed to
|
||||||
|
// libsystem_malloc.
|
||||||
|
static malloc_zone_t g_delegating_zone;
|
||||||
|
static malloc_introspection_t g_delegating_zone_introspect;
|
||||||
|
static malloc_zone_t* g_default_zone;
|
||||||
|
|
||||||
|
// Make sure that the default zone is instantiated.
|
||||||
|
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||||
|
|
||||||
|
g_default_zone = GetDefaultMallocZone();
|
||||||
|
|
||||||
|
// The delegating zone:
|
||||||
|
// - Forwards all allocations to the existing default zone
|
||||||
|
// - Does *not* claim to own any memory, meaning that it will always be
|
||||||
|
// skipped in free() in libsystem_malloc.dylib.
|
||||||
|
//
|
||||||
|
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
|
||||||
|
// the main library. Since the main library depends on many external
|
||||||
|
// libraries, we cannot install PartitionAlloc as the default zone without
|
||||||
|
// concurrency issues.
|
||||||
|
//
|
||||||
|
// Instead, what we do is here, while the process is single-threaded:
|
||||||
|
// - Register the delegating zone as the default one.
|
||||||
|
// - Set the original (libsystem_malloc's) one as the second zone
|
||||||
|
//
|
||||||
|
// Later, when PartitionAlloc initializes, we replace the default (delegating)
|
||||||
|
// zone with ours. The end state is:
|
||||||
|
// 1. PartitionAlloc zone
|
||||||
|
// 2. libsystem_malloc zone
|
||||||
|
|
||||||
|
// Set up of the delegating zone. Note that it doesn't just forward calls to
|
||||||
|
// the default zone. This is because the system zone's malloc_zone_t pointer
|
||||||
|
// actually points to a larger struct, containing allocator metadata. So if we
|
||||||
|
// pass as the first parameter the "simple" delegating zone pointer, then we
|
||||||
|
// immediately crash inside the system zone functions. So we need to replace
|
||||||
|
// the zone pointer as well.
|
||||||
|
//
|
||||||
|
// Calls fall into 4 categories:
|
||||||
|
// - Allocation calls: forwarded to the real system zone
|
||||||
|
// - "Is this pointer yours" calls: always answer no
|
||||||
|
// - free(): Should never be called, but is in practice, see comments below.
|
||||||
|
// - Diagnostics and debugging: these are typically called for every
|
||||||
|
// zone. They are no-ops for us, as we don't want to double-count, or lock
|
||||||
|
// the data structures of the real zone twice.
|
||||||
|
|
||||||
|
// Allocation: Forward to the real zone.
|
||||||
|
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->malloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->calloc(g_default_zone, num_items, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->valloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return g_default_zone->realloc(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
|
||||||
|
void** results, unsigned num_requested) {
|
||||||
|
return g_default_zone->batch_malloc(g_default_zone, size, results,
|
||||||
|
num_requested);
|
||||||
|
};
|
||||||
|
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->memalign(g_default_zone, alignment, size);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Does ptr belong to this zone? Return value is != 0 if so.
|
||||||
|
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Free functions.
|
||||||
|
// The normal path for freeing memory is:
|
||||||
|
// 1. Try all zones in order, call zone->size(ptr)
|
||||||
|
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
|
||||||
|
// 3. If no zone matches, crash.
|
||||||
|
//
|
||||||
|
// Since this zone always returns 0 in size() (see above), then zone->free()
|
||||||
|
// should never be called. Unfortunately, this is not the case, as some places
|
||||||
|
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
|
||||||
|
// crashing, forward the call. It's the caller's responsibility to use the
|
||||||
|
// same zone for free() as for the allocation (this is in the contract of
|
||||||
|
// malloc_zone_free()).
|
||||||
|
//
|
||||||
|
// However, note that the sequence of calls size() -> free() is not possible
|
||||||
|
// for this zone, as size() always returns 0.
|
||||||
|
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
return g_default_zone->free(g_default_zone, ptr);
|
||||||
|
};
|
||||||
|
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed) {
|
||||||
|
return g_default_zone->batch_free(g_default_zone, to_be_freed,
|
||||||
|
num_to_be_freed);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Diagnostics and debugging.
|
||||||
|
//
|
||||||
|
// Do nothing to reduce memory footprint, the real
|
||||||
|
// zone will do it.
|
||||||
|
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
|
||||||
|
size_t goal) -> size_t { return 0; };
|
||||||
|
|
||||||
|
// Introspection calls are not all optional, for instance locking and
|
||||||
|
// unlocking before/after fork() is not optional.
|
||||||
|
//
|
||||||
|
// Nothing to enumerate.
|
||||||
|
g_delegating_zone_introspect.enumerator =
|
||||||
|
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
vm_range_recorder_t recorder) -> kern_return_t {
|
||||||
|
return KERN_SUCCESS;
|
||||||
|
};
|
||||||
|
// Need to provide a real implementation, it is used for e.g. array sizing.
|
||||||
|
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->introspect->good_size(g_default_zone, size);
|
||||||
|
};
|
||||||
|
// Nothing to do.
|
||||||
|
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
|
||||||
|
boolean_t verbose) {};
|
||||||
|
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
|
||||||
|
// Do not forward the lock / unlock calls. Since the default zone is still
|
||||||
|
// there, we should not lock here, as it would lock the zone twice (all
|
||||||
|
// zones are locked before fork().). Rather, do nothing, since this fake
|
||||||
|
// zone does not need any locking.
|
||||||
|
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
|
||||||
|
// No stats.
|
||||||
|
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
|
||||||
|
malloc_statistics_t* stats) {};
|
||||||
|
// We are not locked.
|
||||||
|
g_delegating_zone_introspect.zone_locked =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
// Don't support discharge checking.
|
||||||
|
g_delegating_zone_introspect.enable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
g_delegating_zone_introspect.disable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
|
||||||
|
void* memory) {};
|
||||||
|
|
||||||
|
// Could use something lower to support fewer functions, but this is
|
||||||
|
// consistent with the real zone installed by PartitionAlloc.
|
||||||
|
g_delegating_zone.version = kZoneVersion;
|
||||||
|
g_delegating_zone.introspect = &g_delegating_zone_introspect;
|
||||||
|
// This name is used in PartitionAlloc's initialization to determine whether
|
||||||
|
// it should replace the delegating zone.
|
||||||
|
g_delegating_zone.zone_name = kDelegatingZoneName;
|
||||||
|
|
||||||
|
// Register puts the new zone at the end, unregister swaps the new zone with
|
||||||
|
// the last one.
|
||||||
|
// The zone array is, after these lines, in order:
|
||||||
|
// 1. |g_default_zone|...|g_delegating_zone|
|
||||||
|
// 2. |g_delegating_zone|...| (no more default)
|
||||||
|
// 3. |g_delegating_zone|...|g_default_zone|
|
||||||
|
malloc_zone_register(&g_delegating_zone);
|
||||||
|
malloc_zone_unregister(g_default_zone);
|
||||||
|
malloc_zone_register(g_default_zone);
|
||||||
|
|
||||||
|
// Make sure that the purgeable zone is after the default one.
|
||||||
|
// Will make g_default_zone take the purgeable zone spot
|
||||||
|
malloc_zone_unregister(purgeable_zone);
|
||||||
|
// Add back the purgeable zone as the last one.
|
||||||
|
malloc_zone_register(purgeable_zone);
|
||||||
|
|
||||||
|
// Final configuration:
|
||||||
|
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
|
||||||
|
|
||||||
|
// Sanity check.
|
||||||
|
if (GetDefaultMallocZone() != &g_delegating_zone)
|
||||||
|
abort_report_np("Failed to install the delegating zone as default.");
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS)
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
|
||||||
|
// If PartitionAlloc is one of the zones, *change* its name so that
|
||||||
|
// registration can happen multiple times. This works because zone
|
||||||
|
// registration only keeps a pointer to the struct, it does not copy the data.
|
||||||
|
for (unsigned int i = 0; i < zone_count; i++) {
|
||||||
|
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||||
|
if (zone->zone_name &&
|
||||||
|
strcmp(zone->zone_name, kPartitionAllocZoneName) == 0) {
|
||||||
|
zone->zone_name = "RenamedPartitionAlloc";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
} // namespace partition_alloc
|
37
src/base/allocator/early_zone_registration_mac.h
Normal file
37
src/base/allocator/early_zone_registration_mac.h
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||||
|
|
||||||
|
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
|
||||||
|
// the process becomes multi-threaded.
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
static constexpr char kDelegatingZoneName[] =
|
||||||
|
"DelegatingDefaultZoneForPartitionAlloc";
|
||||||
|
static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
|
||||||
|
|
||||||
|
// Zone version. Determines which callbacks are set in the various malloc_zone_t
|
||||||
|
// structs.
|
||||||
|
constexpr int kZoneVersion = 9;
|
||||||
|
|
||||||
|
// Must be called *once*, *before* the process becomes multi-threaded.
|
||||||
|
void EarlyMallocZoneRegistration();
|
||||||
|
|
||||||
|
// Tricks the registration code to believe that PartitionAlloc was not already
|
||||||
|
// registered. This allows a future library load to register PartitionAlloc's
|
||||||
|
// zone as well, rather than bailing out.
|
||||||
|
//
|
||||||
|
// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
|
||||||
|
// ideally be removed. Indeed, by allowing two zones to be registered, we still
|
||||||
|
// end up with a split heap, and more memory usage.
|
||||||
|
//
|
||||||
|
// This is a hack for crbug.com/1274236.
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_
|
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "base/synchronization/lock.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||||
|
static_assert(std::is_pod<MallocZoneFunctions>::value,
|
||||||
|
"MallocZoneFunctions must be POD");
|
||||||
|
|
||||||
|
void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||||
|
MallocZoneFunctions* functions) {
|
||||||
|
memset(functions, 0, sizeof(MallocZoneFunctions));
|
||||||
|
functions->malloc = zone->malloc;
|
||||||
|
functions->calloc = zone->calloc;
|
||||||
|
functions->valloc = zone->valloc;
|
||||||
|
functions->free = zone->free;
|
||||||
|
functions->realloc = zone->realloc;
|
||||||
|
functions->size = zone->size;
|
||||||
|
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||||
|
functions->free && functions->realloc && functions->size);
|
||||||
|
|
||||||
|
// These functions might be nullptr.
|
||||||
|
functions->batch_malloc = zone->batch_malloc;
|
||||||
|
functions->batch_free = zone->batch_free;
|
||||||
|
|
||||||
|
if (zone->version >= 5) {
|
||||||
|
// Not all custom malloc zones have a memalign.
|
||||||
|
functions->memalign = zone->memalign;
|
||||||
|
}
|
||||||
|
if (zone->version >= 6) {
|
||||||
|
// This may be nullptr.
|
||||||
|
functions->free_definite_size = zone->free_definite_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that zone version 8 introduced a pressure relief callback, and version
|
||||||
|
// 10 introduced a claimed address callback, but neither are allocation or
|
||||||
|
// deallocation callbacks and so aren't important to intercept.
|
||||||
|
|
||||||
|
functions->context = zone;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// All modifications to g_malloc_zones are gated behind this lock.
|
||||||
|
// Dispatch to a malloc zone does not need to acquire this lock.
|
||||||
|
base::Lock& GetLock() {
|
||||||
|
static base::Lock* g_lock = new base::Lock;
|
||||||
|
return *g_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnsureMallocZonesInitializedLocked() {
|
||||||
|
GetLock().AssertAcquired();
|
||||||
|
}
|
||||||
|
|
||||||
|
int g_zone_count = 0;
|
||||||
|
|
||||||
|
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
GetLock().AssertAcquired();
|
||||||
|
for (int i = 0; i < g_zone_count; ++i) {
|
||||||
|
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool StoreMallocZone(ChromeMallocZone* zone) {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
if (IsMallocZoneAlreadyStoredLocked(zone))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (g_zone_count == kMaxZoneCount)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
|
||||||
|
++g_zone_count;
|
||||||
|
|
||||||
|
// No other thread can possibly see these stores at this point. The code that
|
||||||
|
// reads these values is triggered after this function returns. so we want to
|
||||||
|
// guarantee that they are committed at this stage"
|
||||||
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
return IsMallocZoneAlreadyStoredLocked(zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions) {
|
||||||
|
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetMallocZoneCountForTesting() {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
return g_zone_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearAllMallocZonesForTesting() {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
|
||||||
|
g_zone_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||||
|
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/immediate_crash.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||||
|
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size);
|
||||||
|
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||||
|
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
|
||||||
|
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size);
|
||||||
|
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size);
|
||||||
|
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested);
|
||||||
|
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed);
|
||||||
|
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size);
|
||||||
|
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
|
||||||
|
|
||||||
|
struct MallocZoneFunctions {
|
||||||
|
malloc_type malloc;
|
||||||
|
calloc_type calloc;
|
||||||
|
valloc_type valloc;
|
||||||
|
free_type free;
|
||||||
|
realloc_type realloc;
|
||||||
|
memalign_type memalign;
|
||||||
|
batch_malloc_type batch_malloc;
|
||||||
|
batch_free_type batch_free;
|
||||||
|
free_definite_size_type free_definite_size;
|
||||||
|
size_fn_type size;
|
||||||
|
const ChromeMallocZone* context;
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||||
|
MallocZoneFunctions* functions);
|
||||||
|
static constexpr int kMaxZoneCount = 30;
|
||||||
|
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||||
|
|
||||||
|
// The array g_malloc_zones stores all information about malloc zones before
|
||||||
|
// they are shimmed. This information needs to be accessed during dispatch back
|
||||||
|
// into the zone, and additional zones may be added later in the execution fo
|
||||||
|
// the program, so the array needs to be both thread-safe and high-performance.
|
||||||
|
//
|
||||||
|
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
|
||||||
|
// never modify the container, which provides thread-safety to iterators. When
|
||||||
|
// we want to add a MallocZoneFunctions to the container, we:
|
||||||
|
// 1. Fill in all the fields.
|
||||||
|
// 2. Update the total zone count.
|
||||||
|
// 3. Insert a memory barrier.
|
||||||
|
// 4. Insert our shim.
|
||||||
|
//
|
||||||
|
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
|
||||||
|
// pointer to the original malloc zone. When we wish to dispatch back to the
|
||||||
|
// original malloc zones, we iterate through the array, looking for a matching
|
||||||
|
// |context|.
|
||||||
|
//
|
||||||
|
// Most allocations go through the default allocator. We will ensure that the
|
||||||
|
// default allocator is stored as the first MallocZoneFunctions.
|
||||||
|
//
|
||||||
|
// Returns whether the zone was successfully stored.
|
||||||
|
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
|
||||||
|
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
|
||||||
|
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
|
||||||
|
ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
BASE_EXPORT int GetMallocZoneCountForTesting();
|
||||||
|
BASE_EXPORT void ClearAllMallocZonesForTesting();
|
||||||
|
|
||||||
|
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
|
||||||
|
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
|
||||||
|
if (g_malloc_zones[i].context == zone)
|
||||||
|
return g_malloc_zones[i];
|
||||||
|
}
|
||||||
|
IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
148
src/base/allocator/partition_alloc_features.cc
Normal file
148
src/base/allocator/partition_alloc_features.cc
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
const BASE_EXPORT Feature kPartitionAllocDanglingPtr{
|
||||||
|
"PartitionAllocDanglingPtr", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
|
||||||
|
{DanglingPtrMode::kCrash, "crash"},
|
||||||
|
{DanglingPtrMode::kLogSignature, "log_signature"},
|
||||||
|
};
|
||||||
|
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
|
||||||
|
&kPartitionAllocDanglingPtr,
|
||||||
|
"mode",
|
||||||
|
DanglingPtrMode::kCrash,
|
||||||
|
&kDanglingPtrModeOption,
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
// If enabled, PCScan is turned on by default for all partitions that don't
|
||||||
|
// disable it explicitly.
|
||||||
|
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// If enabled, PCScan is turned on only for the browser's malloc partition.
|
||||||
|
const Feature kPartitionAllocPCScanBrowserOnly{
|
||||||
|
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan is turned on only for the renderer's malloc partition.
|
||||||
|
const Feature kPartitionAllocPCScanRendererOnly{
|
||||||
|
"PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, this instance belongs to the Control group of the BackupRefPtr
|
||||||
|
// binary experiment.
|
||||||
|
const Feature kPartitionAllocBackupRefPtrControl{
|
||||||
|
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// Use a larger maximum thread cache cacheable bucket size.
|
||||||
|
const Feature kPartitionAllocLargeThreadCacheSize{
|
||||||
|
"PartitionAllocLargeThreadCacheSize",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
// Not unconditionally enabled on 32 bit Android, since it is a more
|
||||||
|
// memory-constrained platform.
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
|
||||||
|
"PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
const Feature kPartitionAllocBackupRefPtr {
|
||||||
|
"PartitionAllocBackupRefPtr",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
|
||||||
|
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
|
||||||
|
kBackupRefPtrEnabledProcessesOptions[] = {
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
|
||||||
|
"browser-and-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "enabled-processes",
|
||||||
|
BackupRefPtrEnabledProcesses::kBrowserOnly,
|
||||||
|
&kBackupRefPtrEnabledProcessesOptions};
|
||||||
|
|
||||||
|
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
|
||||||
|
{BackupRefPtrMode::kDisabled, "disabled"},
|
||||||
|
{BackupRefPtrMode::kEnabled, "enabled"},
|
||||||
|
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
|
||||||
|
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
|
||||||
|
"disabled-but-2-way-split"},
|
||||||
|
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
|
||||||
|
"disabled-but-3-way-split"},
|
||||||
|
};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
|
||||||
|
&kBackupRefPtrModeOptions};
|
||||||
|
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
|
||||||
|
false}; // Not much noise at the moment to enable by default.
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
|
||||||
|
|
||||||
|
// If enabled, switches the bucket distribution to an alternate one. The
|
||||||
|
// alternate distribution must have buckets that are a subset of the default
|
||||||
|
// one.
|
||||||
|
const Feature kPartitionAllocUseAlternateDistribution{
|
||||||
|
"PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
|
||||||
|
// affect whether PCScan is enabled itself.
|
||||||
|
const Feature kPartitionAllocPCScanMUAwareScheduler{
|
||||||
|
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan frees unconditionally all quarantined objects.
|
||||||
|
// This is a performance testing feature.
|
||||||
|
const Feature kPartitionAllocPCScanImmediateFreeing{
|
||||||
|
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan clears eagerly (synchronously) on free().
|
||||||
|
const Feature kPartitionAllocPCScanEagerClearing{
|
||||||
|
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// In addition to heap, scan also the stack of the current mutator.
|
||||||
|
const Feature kPartitionAllocPCScanStackScanning {
|
||||||
|
"PartitionAllocPCScanStackScanning",
|
||||||
|
#if defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||||
|
};
|
||||||
|
|
||||||
|
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// Whether to sort the active slot spans in PurgeMemory().
|
||||||
|
extern const Feature kPartitionAllocSortActiveSlotSpans{
|
||||||
|
"PartitionAllocSortActiveSlotSpans", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
107
src/base/allocator/partition_alloc_features.h
Normal file
107
src/base/allocator/partition_alloc_features.h
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/metrics/field_trial_params.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
// See /docs/dangling_ptr.md
|
||||||
|
//
|
||||||
|
// Usage:
|
||||||
|
// --enable-features=PartitionAllocDanglingPtr:mode/crash
|
||||||
|
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtr;
|
||||||
|
enum class DanglingPtrMode {
|
||||||
|
// Crash immediately after detecting a dangling raw_ptr.
|
||||||
|
kCrash, // (default)
|
||||||
|
|
||||||
|
// Log the signature of every occurrences without crashing. It is used by
|
||||||
|
// bots.
|
||||||
|
// Format "[DanglingSignature]\t<1>\t<2>"
|
||||||
|
// 1. The function who freed the memory while it was still referenced.
|
||||||
|
// 2. The function who released the raw_ptr reference.
|
||||||
|
kLogSignature,
|
||||||
|
|
||||||
|
// Note: This will be extended with a single shot DumpWithoutCrashing.
|
||||||
|
};
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
|
||||||
|
kDanglingPtrModeParam;
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanRendererOnly;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing;
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
enum class BackupRefPtrEnabledProcesses {
|
||||||
|
// BRP enabled only in the browser process.
|
||||||
|
kBrowserOnly,
|
||||||
|
// BRP enabled only in the browser and renderer processes.
|
||||||
|
kBrowserAndRenderer,
|
||||||
|
// BRP enabled in all processes, except renderer.
|
||||||
|
kNonRenderer,
|
||||||
|
// BRP enabled in all processes.
|
||||||
|
kAllProcesses,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class BackupRefPtrMode {
|
||||||
|
// BRP is disabled across all partitions. Equivalent to the Finch flag being
|
||||||
|
// disabled.
|
||||||
|
kDisabled,
|
||||||
|
|
||||||
|
// BRP is enabled in the main partition, as well as certain Renderer-only
|
||||||
|
// partitions (if enabled in Renderer at all).
|
||||||
|
// This entails splitting the main partition.
|
||||||
|
kEnabled,
|
||||||
|
|
||||||
|
// Same as kEnabled but without zapping quarantined objects.
|
||||||
|
kEnabledWithoutZapping,
|
||||||
|
|
||||||
|
// BRP is disabled, but the main partition is split out, as if BRP was enabled
|
||||||
|
// in the "previous slot" mode.
|
||||||
|
kDisabledButSplitPartitions2Way,
|
||||||
|
|
||||||
|
// BRP is disabled, but the main partition *and* aligned partition are split
|
||||||
|
// out, as if BRP was enabled in the "before allocation" mode.
|
||||||
|
kDisabledButSplitPartitions3Way,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtr;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
|
||||||
|
kBackupRefPtrModeParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableDereferenceCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableExtractionCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableInstantiationCheckParam;
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocDCScan;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocSortActiveSlotSpans;
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
543
src/base/allocator/partition_alloc_support.cc
Normal file
543
src/base/allocator/partition_alloc_support.cc
Normal file
@ -0,0 +1,543 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_alloc_support.h"
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
#include "base/bind.h"
|
||||||
|
#include "base/callback.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/debug/stack_trace.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/immediate_crash.h"
|
||||||
|
#include "base/metrics/histogram_functions.h"
|
||||||
|
#include "base/metrics/histogram_macros.h"
|
||||||
|
#include "base/no_destructor.h"
|
||||||
|
#include "base/strings/string_piece.h"
|
||||||
|
#include "base/strings/string_split.h"
|
||||||
|
#include "base/strings/stringprintf.h"
|
||||||
|
#include "base/thread_annotations.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "base/threading/thread_task_runner_handle.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "base/timer/timer.h"
|
||||||
|
#include "base/trace_event/base_tracing.h"
|
||||||
|
#include "third_party/abseil-cpp/absl/types/optional.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
constexpr const char* ScannerIdToTracingString(
|
||||||
|
partition_alloc::internal::StatsCollector::ScannerId id) {
|
||||||
|
switch (id) {
|
||||||
|
case partition_alloc::internal::StatsCollector::ScannerId::kClear:
|
||||||
|
return "PCScan.Scanner.Clear";
|
||||||
|
case partition_alloc::internal::StatsCollector::ScannerId::kScan:
|
||||||
|
return "PCScan.Scanner.Scan";
|
||||||
|
case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
|
||||||
|
return "PCScan.Scanner.Sweep";
|
||||||
|
case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
|
||||||
|
return "PCScan.Scanner";
|
||||||
|
case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const char* MutatorIdToTracingString(
|
||||||
|
partition_alloc::internal::StatsCollector::MutatorId id) {
|
||||||
|
switch (id) {
|
||||||
|
case partition_alloc::internal::StatsCollector::MutatorId::kClear:
|
||||||
|
return "PCScan.Mutator.Clear";
|
||||||
|
case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
|
||||||
|
return "PCScan.Mutator.ScanStack";
|
||||||
|
case partition_alloc::internal::StatsCollector::MutatorId::kScan:
|
||||||
|
return "PCScan.Mutator.Scan";
|
||||||
|
case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
|
||||||
|
return "PCScan.Mutator";
|
||||||
|
case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
|
||||||
|
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
|
||||||
|
class StatsReporterImpl final : public partition_alloc::StatsReporter {
|
||||||
|
public:
|
||||||
|
void ReportTraceEvent(
|
||||||
|
partition_alloc::internal::StatsCollector::ScannerId id,
|
||||||
|
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
|
||||||
|
int64_t start_time_ticks_internal_value,
|
||||||
|
int64_t end_time_ticks_internal_value) override {
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
// TRACE_EVENT_* macros below drop most parameters when tracing is
|
||||||
|
// disabled at compile time.
|
||||||
|
const char* tracing_id = ScannerIdToTracingString(id);
|
||||||
|
const TimeTicks start_time =
|
||||||
|
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
|
||||||
|
const TimeTicks end_time =
|
||||||
|
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
|
||||||
|
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
|
||||||
|
perfetto::ThreadTrack::ForThread(tid), start_time);
|
||||||
|
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
|
||||||
|
end_time);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportTraceEvent(
|
||||||
|
partition_alloc::internal::StatsCollector::MutatorId id,
|
||||||
|
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
|
||||||
|
int64_t start_time_ticks_internal_value,
|
||||||
|
int64_t end_time_ticks_internal_value) override {
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
// TRACE_EVENT_* macros below drop most parameters when tracing is
|
||||||
|
// disabled at compile time.
|
||||||
|
const char* tracing_id = MutatorIdToTracingString(id);
|
||||||
|
const TimeTicks start_time =
|
||||||
|
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
|
||||||
|
const TimeTicks end_time =
|
||||||
|
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
|
||||||
|
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
|
||||||
|
perfetto::ThreadTrack::ForThread(tid), start_time);
|
||||||
|
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
|
||||||
|
end_time);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportSurvivedQuarantineSize(size_t survived_size) override {
|
||||||
|
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
|
||||||
|
survived_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportSurvivedQuarantinePercent(double survived_rate) override {
|
||||||
|
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
|
||||||
|
// divide back.
|
||||||
|
// TODO(bikineev): Remove after switching to perfetto.
|
||||||
|
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
|
||||||
|
1000 * survived_rate);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
|
||||||
|
TimeDelta sample = Microseconds(sample_in_usec);
|
||||||
|
UmaHistogramTimes(stats_name, sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr char kTraceCategory[] = "partition_alloc";
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
void RegisterPCScanStatsReporter() {
|
||||||
|
static StatsReporterImpl s_reporter;
|
||||||
|
static bool registered = false;
|
||||||
|
|
||||||
|
DCHECK(!registered);
|
||||||
|
|
||||||
|
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
|
||||||
|
registered = true;
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void RunThreadCachePeriodicPurge() {
|
||||||
|
// Micros, since periodic purge should typically take at most a few ms.
|
||||||
|
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
|
||||||
|
TRACE_EVENT0("memory", "PeriodicPurge");
|
||||||
|
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||||
|
instance.RunPeriodicPurge();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||||
|
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||||
|
TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
|
||||||
|
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
|
||||||
|
|
||||||
|
{
|
||||||
|
// Micros, since memory reclaiming should typically take at most a few ms.
|
||||||
|
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
|
||||||
|
instance->ReclaimNormal();
|
||||||
|
}
|
||||||
|
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
|
||||||
|
task_runner->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void StartThreadCachePeriodicPurge() {
|
||||||
|
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||||
|
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||||
|
// Can be called several times.
|
||||||
|
static bool is_memory_reclaimer_running = false;
|
||||||
|
if (is_memory_reclaimer_running)
|
||||||
|
return;
|
||||||
|
is_memory_reclaimer_running = true;
|
||||||
|
|
||||||
|
// The caller of the API fully controls where running the reclaim.
|
||||||
|
// However there are a few reasons to recommend that the caller runs
|
||||||
|
// it on the main thread:
|
||||||
|
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
|
||||||
|
// is more likely in cache when executing on the main thread.
|
||||||
|
// - Memory reclaim takes the partition lock for each partition. As a
|
||||||
|
// consequence, while reclaim is running, the main thread is unlikely to be
|
||||||
|
// able to make progress, as it would be waiting on the lock.
|
||||||
|
// - Finally, this runs in idle time only, so there should be no visible
|
||||||
|
// impact.
|
||||||
|
//
|
||||||
|
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
|
||||||
|
// seconds is useful. Since this is meant to run during idle time only, it is
|
||||||
|
// a reasonable starting point balancing effectivenes vs cost. See
|
||||||
|
// crbug.com/942512 for details and experimental results.
|
||||||
|
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
|
||||||
|
task_runner->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
|
||||||
|
std::map<std::string, std::string> trials;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// BackupRefPtr_Effective and PCScan_Effective record whether or not
|
||||||
|
// BackupRefPtr and/or PCScan are enabled. The experiments aren't independent,
|
||||||
|
// so having a synthetic Finch will help look only at cases where one isn't
|
||||||
|
// affected by the other.
|
||||||
|
|
||||||
|
// Whether PartitionAllocBackupRefPtr is enabled (as determined by
|
||||||
|
// FeatureList::IsEnabled).
|
||||||
|
[[maybe_unused]] bool brp_finch_enabled = false;
|
||||||
|
// Whether PartitionAllocBackupRefPtr is set up for the default behavior. The
|
||||||
|
// default behavior is when either the Finch flag is disabled, or is enabled
|
||||||
|
// in brp-mode=disabled (these two options are equivalent).
|
||||||
|
[[maybe_unused]] bool brp_nondefault_behavior = false;
|
||||||
|
// Whether PartitionAllocBackupRefPtr is set up to enable BRP protection. It
|
||||||
|
// requires the Finch flag to be enabled and brp-mode!=disabled*. Some modes,
|
||||||
|
// e.g. disabled-but-3-way-split, do something (hence can't be considered the
|
||||||
|
// default behavior), but don't enable BRP protection.
|
||||||
|
[[maybe_unused]] bool brp_truly_enabled = false;
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr))
|
||||||
|
brp_finch_enabled = true;
|
||||||
|
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() !=
|
||||||
|
features::BackupRefPtrMode::kDisabled)
|
||||||
|
brp_nondefault_behavior = true;
|
||||||
|
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() ==
|
||||||
|
features::BackupRefPtrMode::kEnabled)
|
||||||
|
brp_truly_enabled = true;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
[[maybe_unused]] bool pcscan_enabled =
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
|
||||||
|
#else
|
||||||
|
false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::string brp_group_name = "Unavailable";
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (pcscan_enabled) {
|
||||||
|
// If PCScan is enabled, just ignore the population.
|
||||||
|
brp_group_name = "Ignore_PCScanIsOn";
|
||||||
|
} else if (!brp_finch_enabled) {
|
||||||
|
// The control group is actually disguised as "enabled", but in fact it's
|
||||||
|
// disabled using a param. This is to differentiate the population that
|
||||||
|
// participates in the control group, from the population that isn't in any
|
||||||
|
// group.
|
||||||
|
brp_group_name = "Ignore_NoGroup";
|
||||||
|
} else {
|
||||||
|
switch (features::kBackupRefPtrModeParam.Get()) {
|
||||||
|
case features::BackupRefPtrMode::kDisabled:
|
||||||
|
brp_group_name = "Disabled";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kEnabled:
|
||||||
|
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||||
|
brp_group_name = "EnabledPrevSlot";
|
||||||
|
#else
|
||||||
|
brp_group_name = "EnabledBeforeAlloc";
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kEnabledWithoutZapping:
|
||||||
|
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||||
|
brp_group_name = "EnabledPrevSlotWithoutZapping";
|
||||||
|
#else
|
||||||
|
brp_group_name = "EnabledBeforeAllocWithoutZapping";
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
|
||||||
|
brp_group_name = "DisabledBut2WaySplit";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
|
||||||
|
brp_group_name = "DisabledBut3WaySplit";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (features::kBackupRefPtrModeParam.Get() !=
|
||||||
|
features::BackupRefPtrMode::kDisabled) {
|
||||||
|
std::string process_selector;
|
||||||
|
switch (features::kBackupRefPtrEnabledProcessesParam.Get()) {
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kBrowserOnly:
|
||||||
|
process_selector = "BrowserOnly";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
|
||||||
|
process_selector = "BrowserAndRenderer";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kNonRenderer:
|
||||||
|
process_selector = "NonRenderer";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kAllProcesses:
|
||||||
|
process_selector = "AllProcesses";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
brp_group_name += ("_" + process_selector);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
trials.emplace("BackupRefPtr_Effective", brp_group_name);
|
||||||
|
|
||||||
|
// On 32-bit architectures, PCScan is not supported and permanently disabled.
|
||||||
|
// Don't lump it into "Disabled", so that belonging to "Enabled"/"Disabled" is
|
||||||
|
// fully controlled by Finch and thus have identical population sizes.
|
||||||
|
std::string pcscan_group_name = "Unavailable";
|
||||||
|
std::string pcscan_group_name_fallback = "Unavailable";
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
if (brp_truly_enabled) {
|
||||||
|
// If BRP protection is enabled, just ignore the population. Check
|
||||||
|
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes
|
||||||
|
// where BRP protection is actually disabled.
|
||||||
|
pcscan_group_name = "Ignore_BRPIsOn";
|
||||||
|
} else {
|
||||||
|
pcscan_group_name = (pcscan_enabled ? "Enabled" : "Disabled");
|
||||||
|
}
|
||||||
|
// In case we are incorrect that PCScan is independent of partition-split
|
||||||
|
// modes, create a fallback trial that only takes into account the BRP Finch
|
||||||
|
// settings that preserve the default behavior.
|
||||||
|
if (brp_nondefault_behavior) {
|
||||||
|
pcscan_group_name_fallback = "Ignore_BRPIsOn";
|
||||||
|
} else {
|
||||||
|
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
trials.emplace("PCScan_Effective", pcscan_group_name);
|
||||||
|
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
return trials;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
internal::PartitionLock g_stack_trace_buffer_lock;
|
||||||
|
|
||||||
|
struct StackTraceWithID {
|
||||||
|
debug::StackTrace stack_trace;
|
||||||
|
uintptr_t id = 0;
|
||||||
|
};
|
||||||
|
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
|
||||||
|
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
|
||||||
|
|
||||||
|
void DanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
// This is called from inside the allocator. No allocation is allowed.
|
||||||
|
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer)
|
||||||
|
PA_DCHECK(!entry || entry->id != id);
|
||||||
|
#endif // DCHECK_IS_ON()
|
||||||
|
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
|
||||||
|
if (!entry) {
|
||||||
|
entry = {debug::StackTrace(), id};
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The StackTrace hasn't been recorded, because the buffer isn't large
|
||||||
|
// enough.
|
||||||
|
}
|
||||||
|
|
||||||
|
// From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one
|
||||||
|
// whose id match |id|. Return nullopt if not found.
|
||||||
|
absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
|
||||||
|
if (entry && entry->id == id) {
|
||||||
|
debug::StackTrace stack_trace = std::move(entry->stack_trace);
|
||||||
|
entry = absl::nullopt;
|
||||||
|
return stack_trace;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract from the StackTrace output, the signature of the pertinent caller.
|
||||||
|
// This function is meant to be used only by Chromium developers, to list what
|
||||||
|
// are all the dangling raw_ptr occurrences in a table.
|
||||||
|
std::string ExtractDanglingPtrSignature(std::string stacktrace) {
|
||||||
|
std::vector<StringPiece> lines = SplitStringPiece(
|
||||||
|
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
|
||||||
|
|
||||||
|
// We are looking for the callers of the function releasing the raw_ptr and
|
||||||
|
// freeing memory:
|
||||||
|
const StringPiece callees[] = {
|
||||||
|
"internal::BackupRefPtrImpl<>::ReleaseInternal()",
|
||||||
|
"internal::PartitionFree()",
|
||||||
|
"base::(anonymous namespace)::FreeFn()",
|
||||||
|
};
|
||||||
|
size_t caller_index = 0;
|
||||||
|
for (size_t i = 0; i < lines.size(); ++i) {
|
||||||
|
for (const auto& callee : callees) {
|
||||||
|
if (lines[i].find(callee) != StringPiece::npos) {
|
||||||
|
caller_index = i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (caller_index >= lines.size()) {
|
||||||
|
return "undefined";
|
||||||
|
}
|
||||||
|
StringPiece caller = lines[caller_index];
|
||||||
|
|
||||||
|
// |callers| follows the following format:
|
||||||
|
//
|
||||||
|
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend()
|
||||||
|
// -- -------------- -----------------------------------------------
|
||||||
|
// Depth Address Function
|
||||||
|
|
||||||
|
size_t address_start = caller.find(' ');
|
||||||
|
size_t function_start = caller.find(' ', address_start + 1);
|
||||||
|
|
||||||
|
if (address_start == caller.npos || function_start == caller.npos) {
|
||||||
|
return "undefined";
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::string(caller.substr(function_start + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
|
||||||
|
// This is called from raw_ptr<>'s release operation. Making allocations is
|
||||||
|
// allowed. In particular, symbolizing and printing the StackTraces may
|
||||||
|
// allocate memory.
|
||||||
|
|
||||||
|
debug::StackTrace stack_trace_release;
|
||||||
|
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
|
||||||
|
|
||||||
|
if (stack_trace_free) {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"[DanglingSignature]\t%s\t%s",
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
|
||||||
|
} else {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"[DanglingSignature]\t%s\tmissing-stacktrace",
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DanglingRawPtrReleasedCrash(uintptr_t id) {
|
||||||
|
// This is called from raw_ptr<>'s release operation. Making allocations is
|
||||||
|
// allowed. In particular, symbolizing and printing the StackTraces may
|
||||||
|
// allocate memory.
|
||||||
|
debug::StackTrace stack_trace_release;
|
||||||
|
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
|
||||||
|
|
||||||
|
if (stack_trace_free) {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
|
||||||
|
":\n\n"
|
||||||
|
"The memory was freed at:\n%s\n"
|
||||||
|
"The dangling raw_ptr was released at:\n%s",
|
||||||
|
id, stack_trace_free->ToString().c_str(),
|
||||||
|
stack_trace_release.ToString().c_str());
|
||||||
|
} else {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
|
||||||
|
":\n\n"
|
||||||
|
"It was not recorded where the memory was freed.\n\n"
|
||||||
|
"The dangling raw_ptr was released at:\n%s",
|
||||||
|
id, stack_trace_release.ToString().c_str());
|
||||||
|
}
|
||||||
|
IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearDanglingRawPtrBuffer() {
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
g_stack_trace_buffer = DanglingRawPtrBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void InstallDanglingRawPtrChecks() {
|
||||||
|
// Clearing storage is useful for running multiple unit tests without
|
||||||
|
// restarting the test executable.
|
||||||
|
ClearDanglingRawPtrBuffer();
|
||||||
|
|
||||||
|
if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
|
||||||
|
partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
|
||||||
|
partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (features::kDanglingPtrModeParam.Get()) {
|
||||||
|
case features::DanglingPtrMode::kCrash:
|
||||||
|
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
|
||||||
|
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case features::DanglingPtrMode::kLogSignature:
|
||||||
|
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
|
||||||
|
partition_alloc::SetDanglingRawPtrReleasedFn(
|
||||||
|
DanglingRawPtrReleasedLogSignature);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
|
||||||
|
// is a dangling pointer, we should crash at some point. Consider providing an
|
||||||
|
// API to periodically check the buffer.
|
||||||
|
|
||||||
|
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
void InstallDanglingRawPtrChecks() {}
|
||||||
|
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
42
src/base/allocator/partition_alloc_support.h
Normal file
42
src/base/allocator/partition_alloc_support.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/memory/scoped_refptr.h"
|
||||||
|
#include "base/task/sequenced_task_runner.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
BASE_EXPORT void RegisterPCScanStatsReporter();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Starts a periodic timer on the current thread to purge all thread caches.
|
||||||
|
BASE_EXPORT void StartThreadCachePeriodicPurge();
|
||||||
|
|
||||||
|
BASE_EXPORT void StartMemoryReclaimer(
|
||||||
|
scoped_refptr<SequencedTaskRunner> task_runner);
|
||||||
|
|
||||||
|
BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
|
||||||
|
|
||||||
|
// Install handlers for when dangling raw_ptr(s) have been detected. This prints
|
||||||
|
// two StackTraces. One where the memory is freed, one where the last dangling
|
||||||
|
// raw_ptr stopped referencing it.
|
||||||
|
//
|
||||||
|
// This is currently effective, only when compiled with
|
||||||
|
// `enable_dangling_raw_ptr_checks` build flag.
|
||||||
|
BASE_EXPORT void InstallDanglingRawPtrChecks();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
417
src/base/allocator/partition_allocator/BUILD.gn
Normal file
417
src/base/allocator/partition_allocator/BUILD.gn
Normal file
@ -0,0 +1,417 @@
|
|||||||
|
# Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/allocator.gni")
|
||||||
|
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/chromecast_build.gni")
|
||||||
|
import("//build/config/chromeos/ui_mode.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
import("//build/config/logging.gni")
|
||||||
|
|
||||||
|
# Add partition_alloc.gni and import it for partition_alloc configs.
|
||||||
|
|
||||||
|
config("partition_alloc_implementation") {
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_PARTITION_ALLOC_IMPL" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("memory_tagging") {
|
||||||
|
if (current_cpu == "arm64" && is_clang &&
|
||||||
|
(is_linux || is_chromeos || is_android || is_fuchsia)) {
|
||||||
|
# base/ has access to the MTE intrinsics because it needs to use them,
|
||||||
|
# but they're not backwards compatible. Use base::CPU::has_mte()
|
||||||
|
# beforehand to confirm or use indirect functions (ifuncs) to select
|
||||||
|
# an MTE-specific implementation at dynamic link-time.
|
||||||
|
cflags = [
|
||||||
|
"-Xclang",
|
||||||
|
"-target-feature",
|
||||||
|
"-Xclang",
|
||||||
|
"+mte",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_fuchsia) {
|
||||||
|
config("fuchsia_sync_lib") {
|
||||||
|
libs = [
|
||||||
|
"sync", # Used by spinning_mutex.h.
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (make_partition_alloc_standalone) {
|
||||||
|
partition_alloc_target_type = "component"
|
||||||
|
} else {
|
||||||
|
if (is_component_build) {
|
||||||
|
partition_alloc_target_type = "source_set"
|
||||||
|
} else {
|
||||||
|
partition_alloc_target_type = "static_library"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target(partition_alloc_target_type, "partition_alloc") {
|
||||||
|
sources = [
|
||||||
|
"address_pool_manager.cc",
|
||||||
|
"address_pool_manager.h",
|
||||||
|
"address_pool_manager_bitmap.cc",
|
||||||
|
"address_pool_manager_bitmap.h",
|
||||||
|
"address_pool_manager_types.h",
|
||||||
|
"address_space_randomization.cc",
|
||||||
|
"address_space_randomization.h",
|
||||||
|
"address_space_stats.h",
|
||||||
|
"allocation_guard.cc",
|
||||||
|
"allocation_guard.h",
|
||||||
|
"dangling_raw_ptr_checks.cc",
|
||||||
|
"dangling_raw_ptr_checks.h",
|
||||||
|
"memory_reclaimer.cc",
|
||||||
|
"memory_reclaimer.h",
|
||||||
|
"oom.cc",
|
||||||
|
"oom.h",
|
||||||
|
"oom_callback.cc",
|
||||||
|
"oom_callback.h",
|
||||||
|
"page_allocator.cc",
|
||||||
|
"page_allocator.h",
|
||||||
|
"page_allocator_constants.h",
|
||||||
|
"page_allocator_internal.h",
|
||||||
|
"partition_address_space.cc",
|
||||||
|
"partition_address_space.h",
|
||||||
|
"partition_alloc-inl.h",
|
||||||
|
"partition_alloc.cc",
|
||||||
|
"partition_alloc.h",
|
||||||
|
"partition_alloc_base/atomic_ref_count.h",
|
||||||
|
"partition_alloc_base/bit_cast.h",
|
||||||
|
"partition_alloc_base/bits.h",
|
||||||
|
"partition_alloc_base/check.cc",
|
||||||
|
"partition_alloc_base/check.h",
|
||||||
|
"partition_alloc_base/compiler_specific.h",
|
||||||
|
"partition_alloc_base/component_export.h",
|
||||||
|
"partition_alloc_base/cpu.cc",
|
||||||
|
"partition_alloc_base/cpu.h",
|
||||||
|
"partition_alloc_base/cxx17_backports.h",
|
||||||
|
"partition_alloc_base/debug/alias.cc",
|
||||||
|
"partition_alloc_base/debug/alias.h",
|
||||||
|
"partition_alloc_base/gtest_prod_util.h",
|
||||||
|
"partition_alloc_base/immediate_crash.h",
|
||||||
|
"partition_alloc_base/logging.cc",
|
||||||
|
"partition_alloc_base/logging.h",
|
||||||
|
"partition_alloc_base/memory/ref_counted.cc",
|
||||||
|
"partition_alloc_base/memory/ref_counted.h",
|
||||||
|
"partition_alloc_base/memory/scoped_policy.h",
|
||||||
|
"partition_alloc_base/memory/scoped_refptr.h",
|
||||||
|
"partition_alloc_base/migration_adapter.h",
|
||||||
|
"partition_alloc_base/no_destructor.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/math_constants.h",
|
||||||
|
"partition_alloc_base/numerics/ostream_operators.h",
|
||||||
|
"partition_alloc_base/numerics/ranges.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_shared_impl.h",
|
||||||
|
"partition_alloc_base/posix/eintr_wrapper.h",
|
||||||
|
"partition_alloc_base/rand_util.cc",
|
||||||
|
"partition_alloc_base/rand_util.h",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error.h",
|
||||||
|
"partition_alloc_base/strings/stringprintf.cc",
|
||||||
|
"partition_alloc_base/strings/stringprintf.h",
|
||||||
|
"partition_alloc_base/sys_byteorder.h",
|
||||||
|
"partition_alloc_base/thread_annotations.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_ref.h",
|
||||||
|
"partition_alloc_base/time/time.cc",
|
||||||
|
"partition_alloc_base/time/time.h",
|
||||||
|
"partition_alloc_base/time/time_override.cc",
|
||||||
|
"partition_alloc_base/time/time_override.h",
|
||||||
|
"partition_alloc_base/types/strong_alias.h",
|
||||||
|
"partition_alloc_base/win/windows_types.h",
|
||||||
|
"partition_alloc_check.h",
|
||||||
|
"partition_alloc_config.h",
|
||||||
|
"partition_alloc_constants.h",
|
||||||
|
"partition_alloc_forward.h",
|
||||||
|
"partition_alloc_hooks.cc",
|
||||||
|
"partition_alloc_hooks.h",
|
||||||
|
"partition_alloc_notreached.h",
|
||||||
|
"partition_bucket.cc",
|
||||||
|
"partition_bucket.h",
|
||||||
|
"partition_bucket_lookup.h",
|
||||||
|
"partition_cookie.h",
|
||||||
|
"partition_direct_map_extent.h",
|
||||||
|
"partition_freelist_entry.h",
|
||||||
|
"partition_lock.h",
|
||||||
|
"partition_oom.cc",
|
||||||
|
"partition_oom.h",
|
||||||
|
"partition_page.cc",
|
||||||
|
"partition_page.h",
|
||||||
|
"partition_ref_count.h",
|
||||||
|
"partition_root.cc",
|
||||||
|
"partition_root.h",
|
||||||
|
"partition_stats.cc",
|
||||||
|
"partition_stats.h",
|
||||||
|
"partition_tag.h",
|
||||||
|
"partition_tag_bitmap.h",
|
||||||
|
"partition_tag_types.h",
|
||||||
|
"partition_tls.h",
|
||||||
|
"random.cc",
|
||||||
|
"random.h",
|
||||||
|
"reservation_offset_table.cc",
|
||||||
|
"reservation_offset_table.h",
|
||||||
|
"spinning_mutex.cc",
|
||||||
|
"spinning_mutex.h",
|
||||||
|
"starscan/logging.h",
|
||||||
|
"starscan/metadata_allocator.cc",
|
||||||
|
"starscan/metadata_allocator.h",
|
||||||
|
"starscan/pcscan.cc",
|
||||||
|
"starscan/pcscan.h",
|
||||||
|
"starscan/pcscan_internal.cc",
|
||||||
|
"starscan/pcscan_internal.h",
|
||||||
|
"starscan/pcscan_scheduling.cc",
|
||||||
|
"starscan/pcscan_scheduling.h",
|
||||||
|
"starscan/raceful_worklist.h",
|
||||||
|
"starscan/scan_loop.h",
|
||||||
|
"starscan/snapshot.cc",
|
||||||
|
"starscan/snapshot.h",
|
||||||
|
"starscan/stack/stack.cc",
|
||||||
|
"starscan/stack/stack.h",
|
||||||
|
"starscan/starscan_fwd.h",
|
||||||
|
"starscan/state_bitmap.h",
|
||||||
|
"starscan/stats_collector.cc",
|
||||||
|
"starscan/stats_collector.h",
|
||||||
|
"starscan/stats_reporter.h",
|
||||||
|
"starscan/write_protector.cc",
|
||||||
|
"starscan/write_protector.h",
|
||||||
|
"tagging.cc",
|
||||||
|
"tagging.h",
|
||||||
|
"thread_cache.cc",
|
||||||
|
"thread_cache.h",
|
||||||
|
"yield_processor.h",
|
||||||
|
]
|
||||||
|
defines = []
|
||||||
|
if (is_win) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_win.h",
|
||||||
|
"partition_alloc_base/rand_util_win.cc",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error_win.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_win.cc",
|
||||||
|
"partition_alloc_base/time/time_win.cc",
|
||||||
|
"partition_tls_win.cc",
|
||||||
|
]
|
||||||
|
} else if (is_posix) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_posix.cc",
|
||||||
|
"page_allocator_internals_posix.h",
|
||||||
|
"partition_alloc_base/files/file_util.h",
|
||||||
|
"partition_alloc_base/files/file_util_posix.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/rand_util_posix.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_android || is_chromeos_ash) {
|
||||||
|
sources += [ "partition_alloc_base/time/time_android.cc" ]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
sources += [ "partition_alloc_base/time/time_mac.mm" ]
|
||||||
|
} else {
|
||||||
|
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
|
||||||
|
}
|
||||||
|
} else if (is_fuchsia) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_fuchsia.h",
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.h",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/rand_util_fuchsia.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_fuchsia.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
# Only android build requires native_library, and native_library depends
|
||||||
|
# on file_path. So file_path is added if is_android = true.
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/files/file_path.cc",
|
||||||
|
"partition_alloc_base/files/file_path.h",
|
||||||
|
"partition_alloc_base/native_library.cc",
|
||||||
|
"partition_alloc_base/native_library.h",
|
||||||
|
"partition_alloc_base/native_library_posix.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
# Apple-specific utilities
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/mac/foundation_util.h",
|
||||||
|
"partition_alloc_base/mac/foundation_util.mm",
|
||||||
|
"partition_alloc_base/mac/mac_util.h",
|
||||||
|
"partition_alloc_base/mac/mac_util.mm",
|
||||||
|
"partition_alloc_base/mac/scoped_cftyperef.h",
|
||||||
|
"partition_alloc_base/mac/scoped_typeref.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (current_cpu == "x64") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "x86") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm64") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
|
||||||
|
} else {
|
||||||
|
# To support a trampoline for another arch, please refer to v8/src/heap/base.
|
||||||
|
}
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":logging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
]
|
||||||
|
|
||||||
|
configs += [
|
||||||
|
":partition_alloc_implementation",
|
||||||
|
":memory_tagging",
|
||||||
|
]
|
||||||
|
deps = []
|
||||||
|
public_configs = []
|
||||||
|
if (is_android) {
|
||||||
|
# tagging.cc requires __arm_mte_set_* functions.
|
||||||
|
deps += [ "//third_party/android_ndk:cpu_features" ]
|
||||||
|
}
|
||||||
|
if (is_fuchsia) {
|
||||||
|
public_deps += [
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/fit",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/zx",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Needed for users of spinning_mutex.h, which for performance reasons,
|
||||||
|
# contains inlined calls to `libsync` inside the header file.
|
||||||
|
# It appends an entry to the "libs" section of the dependent target.
|
||||||
|
public_configs += [ ":fuchsia_sync_lib" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
frameworks = []
|
||||||
|
if (is_mac) {
|
||||||
|
# SecTaskGetCodeSignStatus needs:
|
||||||
|
frameworks += [ "Security.framework" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
configs += [ "//build/config/compiler:wexit_time_destructors" ]
|
||||||
|
|
||||||
|
# Partition alloc is relatively hot (>1% of cycles for users of CrOS). Use speed-focused
|
||||||
|
# optimizations for it.
|
||||||
|
if (!is_debug) {
|
||||||
|
configs -= [ "//build/config/compiler:default_optimization" ]
|
||||||
|
configs += [ "//build/config/compiler:optimize_speed" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("partition_alloc_buildflags") {
|
||||||
|
header = "partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||||
|
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||||
|
"Partition alloc requires the allocator shim")
|
||||||
|
|
||||||
|
# BackupRefPtr(BRP) build flags.
|
||||||
|
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||||
|
_put_ref_count_in_previous_slot =
|
||||||
|
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||||
|
_enable_backup_ref_ptr_slow_checks =
|
||||||
|
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||||
|
_enable_dangling_raw_ptr_checks =
|
||||||
|
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||||
|
|
||||||
|
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||||
|
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||||
|
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||||
|
|
||||||
|
_record_alloc_info = false
|
||||||
|
|
||||||
|
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
|
||||||
|
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
|
||||||
|
# partition alloc. For partition alloc,
|
||||||
|
# gen/base/allocator/partition_allocator/partition_alloc_buildflags.h
|
||||||
|
# defines and partition alloc includes the header file. For chrome,
|
||||||
|
# gen/base/allocator/buildflags.h defines and chrome includes.
|
||||||
|
flags = [
|
||||||
|
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||||
|
|
||||||
|
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||||
|
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||||
|
|
||||||
|
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||||
|
|
||||||
|
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||||
|
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromecast_buildflags") {
|
||||||
|
header = "chromecast_buildflags.h"
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_IS_CAST_ANDROID=$is_cast_android",
|
||||||
|
"PA_IS_CASTOS=$is_castos",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromeos_buildflags") {
|
||||||
|
header = "chromeos_buildflags.h"
|
||||||
|
|
||||||
|
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("logging_buildflags") {
|
||||||
|
header = "logging_buildflags.h"
|
||||||
|
|
||||||
|
flags = [ "PA_ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("debugging_buildflags") {
|
||||||
|
header = "debugging_buildflags.h"
|
||||||
|
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
|
||||||
|
|
||||||
|
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
|
||||||
|
# but avails it as a buildflag.
|
||||||
|
_dcheck_is_on = is_debug || dcheck_always_on
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_DCHECK_IS_ON=$_dcheck_is_on",
|
||||||
|
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
|
||||||
|
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
group("buildflags") {
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":logging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
|
||||||
|
# move test code here. i.e. test("partition_alloc_tests") { ... } and
|
||||||
|
# test("partition_alloc_perftests").
|
30
src/base/allocator/partition_allocator/DEPS
Normal file
30
src/base/allocator/partition_allocator/DEPS
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# PartitionAlloc is planned to be extracted into a standalone library, and
|
||||||
|
# therefore dependencies need to be strictly controlled and minimized.
|
||||||
|
|
||||||
|
noparent = True
|
||||||
|
|
||||||
|
include_rules = [
|
||||||
|
"+build/build_config.h",
|
||||||
|
"+build/buildflag.h",
|
||||||
|
"+third_party/lss/linux_syscall_support.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
specific_include_rules = {
|
||||||
|
".*_(perf|unit)test\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
"+base/debug/proc_maps_linux.h",
|
||||||
|
"+base/system/sys_info.h",
|
||||||
|
"+base/test/gtest_util.h",
|
||||||
|
"+base/timer/lap_timer.h",
|
||||||
|
"+base/win/windows_version.h",
|
||||||
|
"+testing/gmock/include/gmock/gmock.h",
|
||||||
|
"+testing/gtest/include/gtest/gtest.h",
|
||||||
|
"+testing/perf/perf_result_reporter.h",
|
||||||
|
],
|
||||||
|
"extended_api\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
],
|
||||||
|
"gtest_prod_util\.h$": [
|
||||||
|
"+testing/gtest/include/gtest/gtest_prod.h",
|
||||||
|
],
|
||||||
|
}
|
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Blink>MemoryAllocator>Partition"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Also security-dev@chromium.org
|
||||||
|
team_email: "platform-architecture-dev@chromium.org"
|
4
src/base/allocator/partition_allocator/OWNERS
Normal file
4
src/base/allocator/partition_allocator/OWNERS
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
bartekn@chromium.org
|
||||||
|
haraken@chromium.org
|
||||||
|
lizeb@chromium.org
|
||||||
|
tasak@google.com
|
193
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
193
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
# PartitionAlloc Design
|
||||||
|
|
||||||
|
This document describes PartitionAlloc at a high level, with some architectural
|
||||||
|
details. For implementation details, see the comments in
|
||||||
|
`partition_alloc_constants.h`.
|
||||||
|
|
||||||
|
## Quick Links
|
||||||
|
|
||||||
|
* [Glossary](./glossary.md): Definitions of terms commonly used in
|
||||||
|
PartitionAlloc. The present document largely avoids defining terms.
|
||||||
|
|
||||||
|
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and
|
||||||
|
macros.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
PartitionAlloc is a memory allocator optimized for space efficiency,
|
||||||
|
allocation latency, and security.
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
|
||||||
|
paths of allocation and deallocation require very few (reasonably predictable)
|
||||||
|
branches. The number of operations in the fast paths is minimal, leading to the
|
||||||
|
possibility of inlining.
|
||||||
|
|
||||||
|
![general architecture](./dot/layers.png)
|
||||||
|
|
||||||
|
However, even the fast path isn't the fastest, because it requires taking
|
||||||
|
a per-partition lock. Although we optimized the lock, there was still room for
|
||||||
|
improvement; to this end, we introduced the thread cache.
|
||||||
|
The thread cache has been tailored to satisfy a vast majority of requests by
|
||||||
|
allocating from and releasing memory to the main allocator in batches,
|
||||||
|
amortizing lock acquisition and further improving locality while not trapping
|
||||||
|
excess memory.
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
Security is one of the important goals of PartitionAlloc.
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that different partitions exist in different regions
|
||||||
|
of the process's address space. When the caller has freed all objects contained
|
||||||
|
in a page in a partition, PartitionAlloc returns the physical memory to the
|
||||||
|
operating system, but continues to reserve the region of address space.
|
||||||
|
PartitionAlloc will only reuse an address space region for the same partition.
|
||||||
|
|
||||||
|
Similarly, one page can contain only objects from the same bucket.
|
||||||
|
When freed, PartitionAlloc returns the physical memory, but continues to reserve
|
||||||
|
the region for this very bucket.
|
||||||
|
|
||||||
|
The above techniques help avoid type confusion attacks. Note, however, these
|
||||||
|
apply only to normal buckets and not to direct map, as it'd waste too much
|
||||||
|
address space.
|
||||||
|
|
||||||
|
PartitionAlloc also guarantees that:
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt into, out of, or between
|
||||||
|
partitions. There are guard pages at the beginning and the end of each memory
|
||||||
|
region owned by a partition.
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt the allocation metadata.
|
||||||
|
PartitionAlloc records metadata in a dedicated, out-of-line region (not
|
||||||
|
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
|
||||||
|
exception.)
|
||||||
|
|
||||||
|
* Partial pointer overwrite of freelist pointer should fault.
|
||||||
|
|
||||||
|
* Direct map allocations have guard pages at the beginning and the end.
|
||||||
|
|
||||||
|
### Alignment
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that returned pointers are aligned on
|
||||||
|
`partition_alloc::internal::kAlignment` boundary (typically 16B on
|
||||||
|
64-bit systems, and 8B on 32-bit).
|
||||||
|
|
||||||
|
PartitionAlloc also supports higher levels of alignment, that can be requested
|
||||||
|
via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
|
||||||
|
`posix_memalign()`). The requested
|
||||||
|
alignment has to be a power of two. PartitionAlloc reserves the right to round
|
||||||
|
up the requested size to the nearest power of two, greater than or equal to the
|
||||||
|
requested alignment. This may be wasteful, but allows taking advantage of
|
||||||
|
natural PartitionAlloc alignment guarantees. Allocations with an alignment
|
||||||
|
requirement greater than `partition_alloc::internal::kAlignment` are expected
|
||||||
|
to be very rare.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Layout in Memory
|
||||||
|
|
||||||
|
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
|
||||||
|
pages. Each super page is split into partition pages.
|
||||||
|
The first and the last partition page are permanently inaccessible and serve
|
||||||
|
as guard pages, with the exception of one system page in the middle of the first
|
||||||
|
partition page that holds metadata (32B struct per partition page).
|
||||||
|
|
||||||
|
![anatomy of a super page](./dot/super-page.png)
|
||||||
|
|
||||||
|
* The slot span numbers provide a visual hint of their size (in partition
|
||||||
|
pages).
|
||||||
|
* Colors provide a visual hint of the bucket to which the slot span belongs.
|
||||||
|
* Although only five colors are shown, in reality, a super page holds
|
||||||
|
tens of slot spans, some of which belong to the same bucket.
|
||||||
|
* The system page that holds metadata tracks each partition page with one 32B
|
||||||
|
[`PartitionPage` struct][PartitionPage], which is either
|
||||||
|
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
|
||||||
|
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
|
||||||
|
diagram).
|
||||||
|
* Gray fill denotes guard pages (one partition page each at the head and tail
|
||||||
|
of each super page).
|
||||||
|
* In some configurations, PartitionAlloc stores more metadata than can
|
||||||
|
fit in the one system page at the front. These are the bitmaps for
|
||||||
|
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
|
||||||
|
what would otherwise be usable space for slot spans. One, both, or
|
||||||
|
none of these bitmaps may be present, depending on build
|
||||||
|
configuration, runtime configuration, and type of allocation.
|
||||||
|
See [`SuperPagePayloadBegin()`][payload-start] for details.
|
||||||
|
|
||||||
|
As allocation requests arrive, there is eventually a need to allocate a new slot
|
||||||
|
span.
|
||||||
|
Address space for such a slot span is carved out from the last super page. If
|
||||||
|
not enough space, a new super page is allocated. Due to varying sizes of slot
|
||||||
|
span, this may lead to leaving space unused (we never go back to fill previous
|
||||||
|
super pages), which is fine because this memory is merely reserved, which is far
|
||||||
|
less precious than committed memory. Note also that address space reserved for a
|
||||||
|
slot span is never released, even if the slot span isn't used for a long time.
|
||||||
|
|
||||||
|
All slots in a newly allocated slot span are *free*, i.e. available for
|
||||||
|
allocation.
|
||||||
|
|
||||||
|
### Freelist Pointers
|
||||||
|
|
||||||
|
All free slots within a slot span are chained into a singly-linked free-list,
|
||||||
|
by writing the *next* pointer at the beginning of each slot, and the head of the
|
||||||
|
list is written in the metadata struct.
|
||||||
|
|
||||||
|
However, writing a pointer in each free slot of a newly allocated span would
|
||||||
|
require committing and faulting in physical pages upfront, which would be
|
||||||
|
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
|
||||||
|
Only provisioned slots are chained into the freelist.
|
||||||
|
Once provisioned slots in a span are depleted, then another page worth of slots
|
||||||
|
is provisioned (note, a slot that crosses a page boundary only gets
|
||||||
|
provisioned with slots of the next page). See
|
||||||
|
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
|
||||||
|
|
||||||
|
Freelist pointers are stored at the beginning of each free slot. As such, they
|
||||||
|
are the only metadata that is inline, i.e. stored among the
|
||||||
|
objects. This makes them prone to overruns. On little-endian systems, the
|
||||||
|
pointers are encoded by reversing byte order, so that partial overruns will very
|
||||||
|
likely result in destroying the pointer, as opposed to forming a valid pointer
|
||||||
|
to a nearby location.
|
||||||
|
|
||||||
|
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
|
||||||
|
different manner. This helps PartitionAlloc detect corruptions.
|
||||||
|
|
||||||
|
### Slot Span States
|
||||||
|
|
||||||
|
A slot span can be in any of 4 states:
|
||||||
|
* *Full*. A full span has no free slots.
|
||||||
|
* *Empty*. An empty span has no allocated slots, only free slots.
|
||||||
|
* *Active*. An active span is anything in between the above two.
|
||||||
|
* *Decommitted*. A decommitted span is a special case of an empty span, where
|
||||||
|
all pages are decommitted from memory.
|
||||||
|
|
||||||
|
PartitionAlloc prioritizes getting an available slot from an active span, over
|
||||||
|
an empty one, in hope that the latter can be soon transitioned into a
|
||||||
|
decommitted state, thus releasing memory. There is no mechanism, however, to
|
||||||
|
prioritize selection of a slot span based on the number of already allocated
|
||||||
|
slots.
|
||||||
|
|
||||||
|
An empty span becomes decommitted either when there are too many empty spans
|
||||||
|
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
|
||||||
|
low memory pressure conditions). An allocation can be satisfied from
|
||||||
|
a decommitted span if there are no active or empty spans available. The slot
|
||||||
|
provisioning mechanism kicks back in, committing the pages gradually as needed,
|
||||||
|
and the span becomes active. (There is currently no other way
|
||||||
|
to unprovision slots than decommitting the entire span).
|
||||||
|
|
||||||
|
As mentioned above, a bucket is a collection of slot spans containing slots of
|
||||||
|
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
|
||||||
|
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
|
||||||
|
There is no need for a full span list. The lists are updated lazily. An empty,
|
||||||
|
decommitted or full span may stay on the active list for some time, until
|
||||||
|
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
|
||||||
|
A decommitted span may stay on the empty list for some time,
|
||||||
|
until `PartitionBucket<thread_safe>::SlowPathAlloc()` encounters it. However,
|
||||||
|
the inaccuracy can't happen in the other direction, i.e. an active span can only
|
||||||
|
be on the active list, and an empty span can only be on the active or empty
|
||||||
|
list.
|
||||||
|
|
||||||
|
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454
|
541
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
541
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
@ -0,0 +1,541 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_stats.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
AddressPoolManager AddressPoolManager::singleton_;
|
||||||
|
|
||||||
|
// static
|
||||||
|
AddressPoolManager& AddressPoolManager::GetInstance() {
|
||||||
|
return singleton_;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// This will crash if the range cannot be decommitted.
|
||||||
|
void DecommitPages(uintptr_t address, size_t size) {
|
||||||
|
// Callers rely on the pages being zero-initialized when recommitting them.
|
||||||
|
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
|
||||||
|
// particular on macOS, but |DecommitAndZeroSystemPages| does.
|
||||||
|
DecommitAndZeroSystemPages(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
|
||||||
|
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
|
||||||
|
|
||||||
|
for (pool_handle i = 0; i < std::size(pools_); ++i) {
|
||||||
|
if (!pools_[i].IsInitialized()) {
|
||||||
|
pools_[i].Initialize(ptr, length);
|
||||||
|
return i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PA_NOTREACHED();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolUsedSuperPages(
|
||||||
|
pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pool->GetUsedSuperPages(used);
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return pool->GetBaseAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
for (pool_handle i = 0; i < std::size(pools_); ++i)
|
||||||
|
pools_[i].Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Remove(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
pool->Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!requested_address)
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
const bool is_available = pool->TryReserveChunk(requested_address, length);
|
||||||
|
if (is_available)
|
||||||
|
return requested_address;
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
DecommitPages(address, length);
|
||||||
|
pool->FreeChunk(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
|
||||||
|
PA_CHECK(ptr != 0);
|
||||||
|
PA_CHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_CHECK(!(length & kSuperPageOffsetMask));
|
||||||
|
address_begin_ = ptr;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
address_end_ = ptr + length;
|
||||||
|
PA_DCHECK(address_begin_ < address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
total_bits_ = length / kSuperPageSize;
|
||||||
|
PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
alloc_bitset_.reset();
|
||||||
|
bit_hint_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::IsInitialized() {
|
||||||
|
return address_begin_ != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Reset() {
|
||||||
|
address_begin_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetUsedSuperPages(
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
used = alloc_bitset_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
return address_begin_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t need_bits = requested_size >> kSuperPageShift;
|
||||||
|
|
||||||
|
// Use first-fit policy to find an available chunk from free chunks. Start
|
||||||
|
// from |bit_hint_|, because we know there are no free chunks before.
|
||||||
|
size_t beg_bit = bit_hint_;
|
||||||
|
size_t curr_bit = bit_hint_;
|
||||||
|
while (true) {
|
||||||
|
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
|
||||||
|
// |total_bits_|, return |nullptr| to signal no free chunk was found.
|
||||||
|
size_t end_bit = beg_bit + need_bits;
|
||||||
|
if (end_bit > total_bits_)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
bool found = true;
|
||||||
|
for (; curr_bit < end_bit; ++curr_bit) {
|
||||||
|
if (alloc_bitset_.test(curr_bit)) {
|
||||||
|
// The bit was set, so this chunk isn't entirely free. Set |found=false|
|
||||||
|
// to ensure the outer loop continues. However, continue the inner loop
|
||||||
|
// to set |beg_bit| just past the last set bit in the investigated
|
||||||
|
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
|
||||||
|
// next outer loop pass from checking the same bits.
|
||||||
|
beg_bit = curr_bit + 1;
|
||||||
|
found = false;
|
||||||
|
if (bit_hint_ == curr_bit)
|
||||||
|
++bit_hint_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
|
||||||
|
// mark as allocated) and return the allocated address.
|
||||||
|
if (found) {
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
if (bit_hint_ == beg_bit) {
|
||||||
|
bit_hint_ = end_bit;
|
||||||
|
}
|
||||||
|
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + requested_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOTREACHED();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
|
||||||
|
size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t need_bits = requested_size / kSuperPageSize;
|
||||||
|
const size_t end_bit = begin_bit + need_bits;
|
||||||
|
// Check that requested address is not too high.
|
||||||
|
if (end_bit > total_bits_)
|
||||||
|
return false;
|
||||||
|
// Check if any bit of the requested region is set already.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
if (alloc_bitset_.test(i))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Otherwise, set the bits.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
|
||||||
|
|
||||||
|
PA_DCHECK(address_begin_ <= address);
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + free_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.reset(i);
|
||||||
|
}
|
||||||
|
bit_hint_ = std::min(bit_hint_, beg_bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
|
||||||
|
std::bitset<kMaxSuperPagesInPool> pages;
|
||||||
|
size_t i;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
pages = alloc_bitset_;
|
||||||
|
i = bit_hint_;
|
||||||
|
}
|
||||||
|
|
||||||
|
stats->usage = pages.count();
|
||||||
|
|
||||||
|
size_t largest_run = 0;
|
||||||
|
size_t current_run = 0;
|
||||||
|
for (; i < total_bits_; ++i) {
|
||||||
|
if (!pages[i]) {
|
||||||
|
current_run += 1;
|
||||||
|
continue;
|
||||||
|
} else if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
current_run = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fell out of the loop with last bit being zero. Check once more.
|
||||||
|
if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
stats->largest_available_reservation = largest_run;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolStats(const pool_handle handle,
|
||||||
|
PoolStats* stats) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool->IsInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
pool->GetStats(stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
// Get 64-bit pool stats.
|
||||||
|
GetPoolStats(GetRegularPool(), &stats->regular_pool_stats);
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
GetPoolStats(GetBRPPool(), &stats->brp_pool_stats);
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (IsConfigurablePoolAvailable()) {
|
||||||
|
GetPoolStats(GetConfigurablePool(), &stats->configurable_pool_stats);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
|
||||||
|
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
|
||||||
|
"kGuardOffsetOfBRPPoolBitmap.");
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void SetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!bitmap.test(i));
|
||||||
|
bitmap.set(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void ResetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(bitmap.test(i));
|
||||||
|
bitmap.reset(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t address = AllocPages(requested_address, length, kSuperPageSize,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible,
|
||||||
|
PageTag::kPartitionAlloc);
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
FreePages(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUsed(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given:
|
||||||
|
//
|
||||||
|
// ------+---+---------------+---+----
|
||||||
|
// memory ..... | B | managed by PA | B | ...
|
||||||
|
// regions ------+---+---------------+---+----
|
||||||
|
//
|
||||||
|
// B: PartitionPageSize()-bytes block. This is used internally by the
|
||||||
|
// allocator and is not available for callers.
|
||||||
|
//
|
||||||
|
// This is required to avoid crash caused by the following code:
|
||||||
|
// {
|
||||||
|
// // Assume this allocation happens outside of PartitionAlloc.
|
||||||
|
// raw_ptr<T> ptr = new T[20];
|
||||||
|
// for (size_t i = 0; i < 20; i ++) { ptr++; }
|
||||||
|
// // |ptr| may point to an address inside 'B'.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Suppose that |ptr| points to an address inside B after the loop. If
|
||||||
|
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
|
||||||
|
// crash, since the memory is not allocated by PartitionAlloc.
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUnused(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
// Address regions allocated for normal buckets are never released, so this
|
||||||
|
// function can only be called for direct map. However, do not DCHECK on
|
||||||
|
// IsManagedByDirectMap(address), because many tests test this function using
|
||||||
|
// small allocations.
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given.
|
||||||
|
// (See MarkUsed comment)
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_.reset();
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Counts super pages in use represented by `bitmap`.
|
||||||
|
template <size_t bitsize>
|
||||||
|
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
|
||||||
|
const size_t bits_per_super_page) {
|
||||||
|
size_t count = 0;
|
||||||
|
size_t bit_index = 0;
|
||||||
|
|
||||||
|
// Stride over super pages.
|
||||||
|
for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
|
||||||
|
// Stride over the bits comprising the super page.
|
||||||
|
for (bit_index = super_page_index * bits_per_super_page;
|
||||||
|
bit_index < (super_page_index + 1) * bits_per_super_page &&
|
||||||
|
bit_index < bitsize;
|
||||||
|
++bit_index) {
|
||||||
|
if (bitmap[bit_index]) {
|
||||||
|
count += 1;
|
||||||
|
// Move on to the next super page.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
|
||||||
|
brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
|
||||||
|
} // scoped_lock
|
||||||
|
|
||||||
|
// Pool usage is read out from the address pool bitmaps.
|
||||||
|
// The output stats are sized in super pages, so we interpret
|
||||||
|
// the bitmaps into super page usage.
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize %
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kRegularPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize /
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Get 32-bit pool usage.
|
||||||
|
stats->regular_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kBRPPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
stats->brp_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
|
||||||
|
|
||||||
|
// Get blocklist size.
|
||||||
|
for (const auto& blocked :
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
|
||||||
|
if (blocked.load(std::memory_order_relaxed))
|
||||||
|
stats->blocklist_size += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count failures in finding non-blocklisted addresses.
|
||||||
|
stats->blocklist_hit_count =
|
||||||
|
AddressPoolManagerBitmap::blocklist_hit_count_.load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
|
||||||
|
AddressSpaceStats stats{};
|
||||||
|
if (GetStats(&stats)) {
|
||||||
|
dumper->DumpStats(&stats);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
179
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
179
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||||
|
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
class AddressSpaceStatsDumper;
|
||||||
|
struct AddressSpaceStats;
|
||||||
|
struct PoolStats;
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// (64bit version)
|
||||||
|
// AddressPoolManager takes a reserved virtual address space and manages address
|
||||||
|
// space allocation.
|
||||||
|
//
|
||||||
|
// AddressPoolManager (currently) supports up to 3 pools. Each pool manages a
|
||||||
|
// contiguous reserved address space. Alloc() takes a pool_handle and returns
|
||||||
|
// address regions from the specified pool. Free() also takes a pool_handle and
|
||||||
|
// returns the address region back to the manager.
|
||||||
|
//
|
||||||
|
// (32bit version)
|
||||||
|
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
|
||||||
|
// address regions using bitmaps. IsManagedByPartitionAllocBRPPool and
|
||||||
|
// IsManagedByPartitionAllocRegularPool use the bitmaps to judge whether a given
|
||||||
|
// address is in a pool that supports BackupRefPtr or in a pool that doesn't.
|
||||||
|
// All PartitionAlloc allocations must be in either of the pools.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
|
||||||
|
public:
|
||||||
|
static AddressPoolManager& GetInstance();
|
||||||
|
|
||||||
|
AddressPoolManager(const AddressPoolManager&) = delete;
|
||||||
|
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
pool_handle Add(uintptr_t address, size_t length);
|
||||||
|
void Remove(pool_handle handle);
|
||||||
|
|
||||||
|
// Populate a |used| bitset of superpages currently in use.
|
||||||
|
void GetPoolUsedSuperPages(pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
|
||||||
|
// Return the base address of a pool.
|
||||||
|
uintptr_t GetPoolBaseAddress(pool_handle handle);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Reserves address space from GigaCage.
|
||||||
|
uintptr_t Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length);
|
||||||
|
|
||||||
|
// Frees address space back to GigaCage and decommits underlying system pages.
|
||||||
|
void UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length);
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
void DumpStats(AddressSpaceStatsDumper* dumper);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManagerForTesting;
|
||||||
|
|
||||||
|
constexpr AddressPoolManager() = default;
|
||||||
|
~AddressPoolManager() = default;
|
||||||
|
|
||||||
|
// Populates `stats` if applicable.
|
||||||
|
// Returns whether `stats` was populated. (They might not be, e.g.
|
||||||
|
// if PartitionAlloc is wholly unused in this process.)
|
||||||
|
bool GetStats(AddressSpaceStats* stats);
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
class Pool {
|
||||||
|
public:
|
||||||
|
constexpr Pool() = default;
|
||||||
|
~Pool() = default;
|
||||||
|
|
||||||
|
Pool(const Pool&) = delete;
|
||||||
|
Pool& operator=(const Pool&) = delete;
|
||||||
|
|
||||||
|
void Initialize(uintptr_t ptr, size_t length);
|
||||||
|
bool IsInitialized();
|
||||||
|
void Reset();
|
||||||
|
|
||||||
|
uintptr_t FindChunk(size_t size);
|
||||||
|
void FreeChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
bool TryReserveChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
uintptr_t GetBaseAddress();
|
||||||
|
|
||||||
|
void GetStats(PoolStats* stats);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Lock lock_;
|
||||||
|
|
||||||
|
// The bitset stores the allocation state of the address pool. 1 bit per
|
||||||
|
// super-page: 1 = allocated, 0 = free.
|
||||||
|
std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
// An index of a bit in the bitset before which we know for sure there all
|
||||||
|
// 1s. This is a best-effort hint in the sense that there still may be lots
|
||||||
|
// of 1s after this index, but at least we know there is no point in
|
||||||
|
// starting the search before it.
|
||||||
|
size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
|
||||||
|
|
||||||
|
size_t total_bits_ = 0;
|
||||||
|
uintptr_t address_begin_ = 0;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
uintptr_t address_end_ = 0;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
|
||||||
|
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||||
|
return &pools_[handle - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the stats for the pool identified by `handle`, if
|
||||||
|
// initialized.
|
||||||
|
void GetPoolStats(pool_handle handle, PoolStats* stats);
|
||||||
|
|
||||||
|
Pool pools_[kNumPools];
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
static AddressPoolManager singleton_;
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetRegularPool() {
|
||||||
|
return kRegularPoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetBRPPool() {
|
||||||
|
return kBRPPoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
|
||||||
|
PA_DCHECK(IsConfigurablePoolAvailable());
|
||||||
|
return kConfigurablePoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
Lock g_lock;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
Lock& AddressPoolManagerBitmap::GetLock() {
|
||||||
|
return g_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
std::array<std::atomic_bool,
|
||||||
|
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
|
||||||
|
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
@ -0,0 +1,190 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
|
||||||
|
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
|
||||||
|
// support it. All PartitionAlloc allocations must be in either of the pools.
|
||||||
|
//
|
||||||
|
// This code is specific to 32-bit systems.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
|
||||||
|
public:
|
||||||
|
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
|
||||||
|
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
|
||||||
|
|
||||||
|
// For BRP pool, we use partition page granularity to eliminate the guard
|
||||||
|
// pages from the bitmap at the ends:
|
||||||
|
// - Eliminating the guard page at the beginning is needed so that pointers
|
||||||
|
// to the end of an allocation that immediately precede a super page in BRP
|
||||||
|
// pool don't accidentally fall into that pool.
|
||||||
|
// - Eliminating the guard page at the end is to ensure that the last page
|
||||||
|
// of the address space isn't in the BRP pool. This allows using sentinels
|
||||||
|
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
|
||||||
|
// on an invalid address. (Note, 64-bit systems don't have this problem as
|
||||||
|
// the upper half of the address space always belongs to the OS.)
|
||||||
|
//
|
||||||
|
// Note, direct map allocations also belong to this pool. The same logic as
|
||||||
|
// above applies. It is important to note, however, that the granularity used
|
||||||
|
// here has to be a minimum of partition page size and direct map allocation
|
||||||
|
// granularity. Since DirectMapAllocationGranularity() is no smaller than
|
||||||
|
// PageAllocationGranularity(), we don't need to decrease the bitmap
|
||||||
|
// granularity any further.
|
||||||
|
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
|
||||||
|
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
|
||||||
|
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
|
||||||
|
static constexpr size_t kBRPPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
|
||||||
|
// Regular pool may include both normal bucket and direct map allocations, so
|
||||||
|
// the bitmap granularity has to be at least as small as
|
||||||
|
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
|
||||||
|
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
|
||||||
|
// the granularity to partition page size.
|
||||||
|
static constexpr size_t kBitShiftOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularityShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularity();
|
||||||
|
static_assert(kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
1 << kBitShiftOfRegularPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kRegularPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
static_assert(
|
||||||
|
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
|
||||||
|
regular_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of bounds "
|
||||||
|
"accesses.");
|
||||||
|
// It is safe to read |regular_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
static_assert(std::numeric_limits<uintptr_t>::max() >>
|
||||||
|
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of "
|
||||||
|
"bounds accesses.");
|
||||||
|
// It is safe to read |brp_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static void BanSuperPageFromBRPPool(uintptr_t address) {
|
||||||
|
brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
|
||||||
|
true, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
|
||||||
|
// The only potentially dangerous scenario, in which this check is used, is
|
||||||
|
// when the assignment of the first raw_ptr<T> object for a non-GigaCage
|
||||||
|
// address is racing with the allocation of a new GigCage super-page at the
|
||||||
|
// same address. We assume that if raw_ptr<T> is being initialized with a
|
||||||
|
// raw pointer, the associated allocation is "alive"; otherwise, the issue
|
||||||
|
// should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
|
||||||
|
// In the worst case, when such a fix is impossible, we should just undo the
|
||||||
|
// raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
|
||||||
|
// above assumption holds, the existing allocation will prevent us from
|
||||||
|
// reserving the super-page region and, thus, having the race condition.
|
||||||
|
// Since we rely on that external synchronization, the relaxed memory
|
||||||
|
// ordering should be sufficient.
|
||||||
|
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManager;
|
||||||
|
|
||||||
|
static Lock& GetLock();
|
||||||
|
|
||||||
|
static std::bitset<kRegularPoolBits> regular_pool_bits_
|
||||||
|
PA_GUARDED_BY(GetLock());
|
||||||
|
static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
|
||||||
|
brp_forbidden_super_page_map_;
|
||||||
|
static std::atomic_size_t blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
// No need to add IsManagedByConfigurablePool, because Configurable Pool
|
||||||
|
// doesn't exist on 32-bit.
|
||||||
|
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
|
||||||
|
#endif
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
|
||||||
|
uintptr_t address) {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
using pool_handle = unsigned;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/random.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h> // Must be in front of other Windows header files.
|
||||||
|
|
||||||
|
#include <versionhelpers.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
uintptr_t GetRandomPageBase() {
|
||||||
|
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
random <<= 32ULL;
|
||||||
|
random |= static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
// The ASLRMask() and ASLROffset() constants will be suitable for the
|
||||||
|
// OS and build configuration.
|
||||||
|
#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
// Windows >= 8.1 has the full 47 bits. Use them where available.
|
||||||
|
static bool windows_81 = false;
|
||||||
|
static bool windows_81_initialized = false;
|
||||||
|
if (!windows_81_initialized) {
|
||||||
|
windows_81 = IsWindows8Point1OrGreater();
|
||||||
|
windows_81_initialized = true;
|
||||||
|
}
|
||||||
|
if (!windows_81) {
|
||||||
|
random &= internal::ASLRMaskBefore8_10();
|
||||||
|
} else {
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
}
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#else
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
#else // defined(ARCH_CPU_32_BITS)
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// On win32 host systems the randomization plus huge alignment causes
|
||||||
|
// excessive fragmentation. Plus most of these systems lack ASLR, so the
|
||||||
|
// randomization isn't buying anything. In that case we just skip it.
|
||||||
|
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
|
||||||
|
static BOOL is_wow64 = -1;
|
||||||
|
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
|
||||||
|
is_wow64 = FALSE;
|
||||||
|
if (!is_wow64)
|
||||||
|
return 0;
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#endif // defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
return random;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,290 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Calculates a random preferred mapping address. In calculating an address, we
|
||||||
|
// balance good ASLR against not fragmenting the address space too badly.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
AslrAddress(uintptr_t mask) {
|
||||||
|
return mask & PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
AslrMask(uintptr_t bits) {
|
||||||
|
return AslrAddress((1ULL << bits) - 1ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn off formatting, because the thicket of nested ifdefs below is
|
||||||
|
// incomprehensible without indentation. It is also incomprehensible with
|
||||||
|
// indentation, but the only other option is a combinatorial explosion of
|
||||||
|
// *_{win,linux,mac,foo}_{32,64}.h files.
|
||||||
|
//
|
||||||
|
// clang-format off
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
|
||||||
|
// We shouldn't allocate system pages at all for sanitizer builds. However,
|
||||||
|
// we do, and if random hint addresses interfere with address ranges
|
||||||
|
// hard-coded in those tools, bad things happen. This address range is
|
||||||
|
// copied from TSAN source but works with all tools. See
|
||||||
|
// https://crbug.com/539863.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrAddress(0x007fffffffffULL);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x7e8000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
// Windows 8.10 and newer support the full 48 bit address range. Older
|
||||||
|
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
|
||||||
|
// and may cause a carry, use 47 and 43 bit masks. See
|
||||||
|
// http://www.alex-ionescu.com/?p=246
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(47);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
|
||||||
|
return AslrMask(43);
|
||||||
|
}
|
||||||
|
// Try not to map pages into the range where Windows loads DLLs by default.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return 0x80000000ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
|
||||||
|
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
|
||||||
|
// is destroyed. Using a virtual address space that is too large causes a
|
||||||
|
// leak of about 1 wired [can never be paged out] page per call to mmap. The
|
||||||
|
// page is only reclaimed when the process is killed. Confine the hint to a
|
||||||
|
// 39-bit section of the virtual address space.
|
||||||
|
//
|
||||||
|
// This implementation adapted from
|
||||||
|
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
|
||||||
|
// is that here we clamp to 39 bits, not 32.
|
||||||
|
//
|
||||||
|
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
|
||||||
|
// changes.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
// Be careful, there is a zone where macOS will not map memory, at least
|
||||||
|
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
|
||||||
|
// [0x1000000000, 0x7000000000). Make sure that the range we use is
|
||||||
|
// outside these bounds. In 12.3, there is a reserved area between
|
||||||
|
// MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
|
||||||
|
// which is reserved on ARM64. See these constants in XNU's source code
|
||||||
|
// for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
|
||||||
|
return AslrAddress(0x10000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_X86_64)
|
||||||
|
|
||||||
|
// Linux (and macOS) support the full 47-bit user space of x64 processors.
|
||||||
|
// Use only 46 to allow the kernel a chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
// Restrict the address range on Android to avoid a large performance
|
||||||
|
// regression in single-process WebViews. See https://crbug.com/837640.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_LINUX)
|
||||||
|
|
||||||
|
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
|
||||||
|
// page size and number of levels of translation pages used. We use
|
||||||
|
// 39-bit as base as all setups should support this, lowered to 38-bit
|
||||||
|
// as ASLROffset() could cause a carry.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||||
|
// could cause a carry.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_PPC64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// AIX has 64 bits of virtual addressing, but we limit the address range
|
||||||
|
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
|
||||||
|
// extra address space to isolate the mmap regions.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x400000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(42);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390X)
|
||||||
|
|
||||||
|
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
|
||||||
|
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
|
||||||
|
// chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(40);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
|
||||||
|
// a chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(29);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// For all other POSIX variants, use 30 bits.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_SOLARIS)
|
||||||
|
|
||||||
|
// For our Solaris/illumos mmap hint, we pick a random address in the
|
||||||
|
// bottom half of the top half of the address space (that is, the third
|
||||||
|
// quarter). Because we do not MAP_FIXED, this will be treated only as a
|
||||||
|
// hint -- the system will not fail to mmap because something else
|
||||||
|
// happens to already be mapped at our random address. We deliberately
|
||||||
|
// set the hint high enough to get well above the system's break (that
|
||||||
|
// is, the heap); Solaris and illumos will try the hint and if that
|
||||||
|
// fails allocate as if there were no hint at all. The high hint
|
||||||
|
// prevents the break from getting hemmed in at low values, ceding half
|
||||||
|
// of the address space to the system heap.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x80000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
|
||||||
|
// upper range.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x90000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
|
||||||
|
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
|
||||||
|
// 10.6 and 10.7.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_POSIX)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// This is a good range on 32-bit Windows and Android (the only platforms on
|
||||||
|
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
|
||||||
|
// is no issue with carries here.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#error Please tell us about your exotic hardware! Sounds interesting.
|
||||||
|
|
||||||
|
#endif // defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
52
src/base/allocator/partition_allocator/address_space_stats.h
Normal file
52
src/base/allocator/partition_allocator/address_space_stats.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// All members are measured in super pages.
|
||||||
|
struct PoolStats {
|
||||||
|
size_t usage = 0;
|
||||||
|
|
||||||
|
// On 32-bit, GigaCage is mainly a logical entity, intermingled with
|
||||||
|
// allocations not managed by PartitionAlloc. The "largest available
|
||||||
|
// reservation" is not possible to measure in that case.
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
size_t largest_available_reservation = 0;
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AddressSpaceStats {
|
||||||
|
PoolStats regular_pool_stats;
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PoolStats brp_pool_stats;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
PoolStats configurable_pool_stats;
|
||||||
|
#else
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
size_t blocklist_size; // measured in super pages
|
||||||
|
size_t blocklist_hit_count;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Interface passed to `AddressPoolManager::DumpStats()` to mediate
|
||||||
|
// for `AddressSpaceDumpProvider`.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
|
||||||
|
public:
|
||||||
|
virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
41
src/base/allocator/partition_allocator/allocation_guard.cc
Normal file
41
src/base/allocator/partition_allocator/allocation_guard.cc
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
|
||||||
|
#if defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
thread_local bool g_disallow_allocations;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::ScopedDisallowAllocations() {
|
||||||
|
if (g_disallow_allocations)
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
|
||||||
|
g_disallow_allocations = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::~ScopedDisallowAllocations() {
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::ScopedAllowAllocations() {
|
||||||
|
// Save the previous value, as ScopedAllowAllocations is used in all
|
||||||
|
// partitions, not just the malloc() ones(s).
|
||||||
|
saved_value_ = g_disallow_allocations;
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::~ScopedAllowAllocations() {
|
||||||
|
g_disallow_allocations = saved_value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_ALLOCATION_GUARD)
|
49
src/base/allocator/partition_allocator/allocation_guard.h
Normal file
49
src/base/allocator/partition_allocator/allocation_guard.h
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedDisallowAllocations();
|
||||||
|
~ScopedDisallowAllocations();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedAllowAllocations();
|
||||||
|
~ScopedAllowAllocations();
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool saved_value_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
struct [[maybe_unused]] ScopedDisallowAllocations{};
|
||||||
|
struct [[maybe_unused]] ScopedAllowAllocations{};
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
using ::partition_alloc::ScopedAllowAllocations;
|
||||||
|
using ::partition_alloc::ScopedDisallowAllocations;
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
# This file contains a test function for checking Arm's branch target
|
||||||
|
# identification (BTI) feature, which helps mitigate jump-oriented
|
||||||
|
# programming. To get it working, BTI instructions must be executed
|
||||||
|
# on a compatible core, and the executable pages must be mapped with
|
||||||
|
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
|
||||||
|
# correctly:
|
||||||
|
# 1) Allocate a read-write page.
|
||||||
|
# 2) Copy between the start and end symbols into that page.
|
||||||
|
# 3) Set the page to read-execute with PROT_BTI.
|
||||||
|
# 4) Call the first offset of the page, verify the result.
|
||||||
|
# 5) Call the second offset of the page (skipping the landing pad).
|
||||||
|
# Verify that it crashes as expected.
|
||||||
|
# This test works irrespective of whether BTI is enabled for C/C++
|
||||||
|
# objects via -mbranch-protection=standard.
|
||||||
|
|
||||||
|
.text
|
||||||
|
.global arm_bti_test_function
|
||||||
|
.global arm_bti_test_function_invalid_offset
|
||||||
|
.global arm_bti_test_function_end
|
||||||
|
arm_bti_test_function:
|
||||||
|
# Mark the start of this function as a valid call target.
|
||||||
|
bti jc
|
||||||
|
add x0, x0, #1
|
||||||
|
arm_bti_test_function_invalid_offset:
|
||||||
|
# This label simulates calling an incomplete function.
|
||||||
|
# Jumping here should crash systems which support BTI.
|
||||||
|
add x0, x0, #2
|
||||||
|
ret
|
||||||
|
arm_bti_test_function_end:
|
||||||
|
nop
|
||||||
|
|
||||||
|
// For details see section "6.2 Program Property" in
|
||||||
|
// "ELF for the Arm 64-bit Architecture (AArch64)"
|
||||||
|
// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property
|
||||||
|
.pushsection .note.gnu.property, "a";
|
||||||
|
.balign 8;
|
||||||
|
.long 4;
|
||||||
|
.long 0x10;
|
||||||
|
.long 0x5;
|
||||||
|
.asciz "GNU";
|
||||||
|
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
|
||||||
|
.long 4;
|
||||||
|
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
|
||||||
|
.long 0;
|
||||||
|
.popsection
|
||||||
|
|
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_ARM64)
|
||||||
|
extern "C" {
|
||||||
|
/**
|
||||||
|
* A valid BTI function. Jumping to this funtion should not cause any problem in
|
||||||
|
* a BTI enabled environment.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A function without proper BTI landing pad. Jumping here should crash the
|
||||||
|
* program on systems which support BTI.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function_invalid_offset(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A simple function which immediately returns to sender.
|
||||||
|
**/
|
||||||
|
void arm_bti_test_function_end(void);
|
||||||
|
}
|
||||||
|
#endif // defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
123
src/base/allocator/partition_allocator/build_config.md
Normal file
123
src/base/allocator/partition_allocator/build_config.md
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Build Config
|
||||||
|
|
||||||
|
PartitionAlloc's behavior and operation can be influenced by many
|
||||||
|
different settings. Broadly, these are controlled at the top-level by
|
||||||
|
[GN args][gn-declare-args], which propagate via
|
||||||
|
[buildflags][buildflag-header] and `#defined` clauses.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Most of what you'll want to know exists between
|
||||||
|
|
||||||
|
* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
|
||||||
|
* [`allocator.gni`][allocator-gni],
|
||||||
|
* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
|
||||||
|
* [`//base/BUILD.gn`][base-build-gn].
|
||||||
|
***
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of
|
||||||
|
PartitionAlloc's behavior is governed by compound conditions `#defined`
|
||||||
|
in [`partition_alloc_config.h`][partition-alloc-config].
|
||||||
|
***
|
||||||
|
|
||||||
|
## Select GN Args
|
||||||
|
|
||||||
|
### `use_partition_alloc`
|
||||||
|
|
||||||
|
Defines whether PartitionAlloc is at all available.
|
||||||
|
|
||||||
|
Setting this `false` will entirely remove PartitionAlloc from the
|
||||||
|
Chromium build. _You probably do not want this._
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Back when PartitionAlloc was the dedicated allocator in Blink, disabling
|
||||||
|
it was logically identical to wholly disabling it in Chromium. This GN
|
||||||
|
arg organically grew in scope with the advent of
|
||||||
|
PartitionAlloc-Everywhere and must be `true` as a prerequisite for
|
||||||
|
enabling PA-E.
|
||||||
|
***
|
||||||
|
|
||||||
|
### `use_allocator`
|
||||||
|
|
||||||
|
Does nothing special when value is `"none"`. Enables
|
||||||
|
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is
|
||||||
|
`"partition"`.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
|
||||||
|
includes Blink where PartitionAlloc originated, setting
|
||||||
|
`use_allocator = "none"` does not disable PA usage in Blink.
|
||||||
|
* `use_allocator = "partition"` internally sets
|
||||||
|
`use_partition_alloc_as_malloc = true`, which must not be confused
|
||||||
|
with `use_partition_alloc` (see above).
|
||||||
|
***
|
||||||
|
|
||||||
|
### `use_backup_ref_ptr`
|
||||||
|
|
||||||
|
Specifies `BackupRefPtr` as the implementation for `base::raw_ptr<T>`
|
||||||
|
when `true`. See the [MiraclePtr documentation][miracleptr-doc].
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
BRP requires support from PartitionAlloc, so `use_backup_ref_ptr` also
|
||||||
|
compiles the relevant code into PA. However, this arg does _not_ govern
|
||||||
|
whether or not BRP is actually enabled at runtime - that functionality
|
||||||
|
is controlled by a Finch flag.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Note: Component Builds
|
||||||
|
|
||||||
|
When working on PartitionAlloc, know that `is_debug` defaults to
|
||||||
|
implying `is_component_build`, which interferes with the allocator
|
||||||
|
shim. A typical set of GN args should include
|
||||||
|
|
||||||
|
```none
|
||||||
|
is_debug = true
|
||||||
|
is_component_build = false
|
||||||
|
```
|
||||||
|
|
||||||
|
Conversely, build configurations that have `is_component_build = true`
|
||||||
|
without explicitly specifying PA-specific args will not build with PA-E
|
||||||
|
enabled.
|
||||||
|
|
||||||
|
## Notable Macros
|
||||||
|
|
||||||
|
There is an ongoing effort
|
||||||
|
[to break out PartitionAlloc into a standalone library][pa-ee-crbug].
|
||||||
|
Once PartitionAlloc stands alone from the larger Chrome build apparatus,
|
||||||
|
the code loses access to some macros. This is not an immediate concern,
|
||||||
|
but the team needs to decide either
|
||||||
|
|
||||||
|
* how to propagate these macros in place, or
|
||||||
|
* how to remove them, replacing them with PA-specific build config.
|
||||||
|
|
||||||
|
A non-exhaustive list of work items:
|
||||||
|
|
||||||
|
* `OFFICIAL_BUILD` - influences crash macros and
|
||||||
|
`PA_THREAD_CACHE_ALLOC_STATS`. These are conceptually distinct enough
|
||||||
|
to be worth separating into dedicated build controls.
|
||||||
|
* `IS_PARTITION_ALLOC_IMPL` - must be defined when PartitionAlloc is
|
||||||
|
built as a shared library. This is required to export symbols.
|
||||||
|
* `COMPONENT_BUILD` - component builds (as per
|
||||||
|
`//docs/component_build.md`) must `#define COMPONENT_BUILD`.
|
||||||
|
Additionally, to build Win32, invoker must `#define WIN32`.
|
||||||
|
* `MEMORY_TOOL_REPLACES_ALLOCATOR`
|
||||||
|
* `*_SANITIZER` - mainly influences unit tests.
|
||||||
|
|
||||||
|
TODO(crbug.com/1151236): don't `PA_COMPONENT_EXPORT()` functions defined
|
||||||
|
under `partition_alloc_base/`.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Over time, the above list should evolve into a list of macros / GN args
|
||||||
|
that influence PartitionAlloc's behavior.
|
||||||
|
***
|
||||||
|
|
||||||
|
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
|
||||||
|
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
|
||||||
|
[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
|
||||||
|
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
|
||||||
|
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
|
||||||
|
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
|
||||||
|
[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h
|
||||||
|
[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
|
||||||
|
[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
|
||||||
|
[pa-ee-crbug]: https://crbug.com/1151236
|
@ -0,0 +1,47 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
|
||||||
|
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_detected_fn);
|
||||||
|
return g_dangling_raw_ptr_detected_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_released_fn);
|
||||||
|
return g_dangling_raw_ptr_released_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_detected_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_released_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_detected_fn(id);
|
||||||
|
}
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_released_fn(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
|
||||||
|
// raw_ptr are reported. Its behavior can be configured here.
|
||||||
|
//
|
||||||
|
// Purpose of this level of indirection:
|
||||||
|
// - Ease testing.
|
||||||
|
// - Keep partition_alloc/ independent from base/. In most cases, when a
|
||||||
|
// dangling raw_ptr is detected/released, this involves recording a
|
||||||
|
// base::debug::StackTrace, which isn't desirable inside partition_alloc/.
|
||||||
|
// - Be able (potentially) to turn this feature on/off at runtime based on
|
||||||
|
// dependant's flags.
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
|
||||||
|
// memory region and the allocator is asked to release it.
|
||||||
|
//
|
||||||
|
// It won't be called again with the same `id`, up until (potentially) a call to
|
||||||
|
// DanglingRawPtrReleased(`id`) is made.
|
||||||
|
//
|
||||||
|
// This function is called from within the allocator, and is not allowed to
|
||||||
|
// allocate memory.
|
||||||
|
using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
|
||||||
|
|
||||||
|
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
|
||||||
|
// last dangling raw_ptr stops referencing the memory region.
|
||||||
|
//
|
||||||
|
// This function is allowed to allocate memory.
|
||||||
|
using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
23
src/base/allocator/partition_allocator/dot/layers.dot
Normal file
23
src/base/allocator/partition_allocator/dot/layers.dot
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
digraph G {
|
||||||
|
graph[bgcolor=transparent]
|
||||||
|
node[shape=box,style="filled,rounded",color=deepskyblue]
|
||||||
|
|
||||||
|
subgraph cluster_tc {
|
||||||
|
label = "Thread Cache"
|
||||||
|
rankdir = LR
|
||||||
|
{rank=same;TLS1,TLS2,TLSn}
|
||||||
|
TLS1->TLS2[style=invisible,dir=none]
|
||||||
|
TLS2->TLSn[style=dotted,dir=none]
|
||||||
|
}
|
||||||
|
|
||||||
|
subgraph cluster_central {
|
||||||
|
label = "Central Allocator (per-partition lock)"
|
||||||
|
fast[label="slot span freelists (fast path)"]
|
||||||
|
slow[label="slot span management (slow path)"]
|
||||||
|
# Forces slow path node beneath fast path node.
|
||||||
|
fast->slow[style=invisible,dir=none]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Forces thread-external subgraph beneath thread cache subgraph.
|
||||||
|
TLS2->fast[style=invisible,dir=none]
|
||||||
|
}
|
BIN
src/base/allocator/partition_allocator/dot/layers.png
Normal file
BIN
src/base/allocator/partition_allocator/dot/layers.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 11 KiB |
95
src/base/allocator/partition_allocator/dot/super-page.dot
Normal file
95
src/base/allocator/partition_allocator/dot/super-page.dot
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
digraph G {
|
||||||
|
graph[bgcolor=transparent]
|
||||||
|
node[shape=plaintext]
|
||||||
|
edge[style=dashed]
|
||||||
|
|
||||||
|
invisible_a[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="red" WIDTH="100"></TD>
|
||||||
|
<TD PORT="green" WIDTH="20"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="40"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="300"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="60"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
superpage[xlabel="Super Page",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
|
||||||
|
<TR>
|
||||||
|
<!-- Head Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
|
||||||
|
<TD PORT="metadata"></TD>
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
|
||||||
|
<!-- Bitmaps -->
|
||||||
|
<TD WIDTH="100">Bitmaps(?)</TD>
|
||||||
|
<!-- Several Slot Spans -->
|
||||||
|
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
|
||||||
|
<TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
|
||||||
|
<TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
|
||||||
|
<TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
|
||||||
|
<TD WIDTH="79">...</TD>
|
||||||
|
<!-- Tail Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="39"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
invisible_b[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="green" WIDTH="30"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="60"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="180"></TD>
|
||||||
|
<TD PORT="red" WIDTH="90"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="90"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
metadata_page[xlabel="Metadata",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
<!-- Bitmaps Offset -->
|
||||||
|
<TD> B? </TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Green Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="palegreen">v</TD>
|
||||||
|
<!-- Blue Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="cornflowerblue">v</TD>
|
||||||
|
<TD BGCOLOR="cornflowerblue">+</TD>
|
||||||
|
<!-- Gold Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="gold">v</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Pink Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="deeppink">v</TD>
|
||||||
|
<!-- etc. -->
|
||||||
|
<TD WIDTH="64">...</TD>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
|
||||||
|
invisible_a:red->superpage:red->superpage:red2[color=crimson]
|
||||||
|
superpage:red2->invisible_b:red[color=crimson]
|
||||||
|
invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
|
||||||
|
invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
|
||||||
|
invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
|
||||||
|
invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
|
||||||
|
|
||||||
|
superpage:metadata->metadata_page[style="",arrowhead=odot]
|
||||||
|
}
|
BIN
src/base/allocator/partition_allocator/dot/super-page.png
Normal file
BIN
src/base/allocator/partition_allocator/dot/super-page.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 20 KiB |
87
src/base/allocator/partition_allocator/extended_api.cc
Normal file
87
src/base/allocator/partition_allocator/extended_api.cc
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/extended_api.h"
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||||
|
// Some platforms don't have a thread cache, or it could already have been
|
||||||
|
// disabled.
|
||||||
|
if (!root || !root->flags.with_thread_cache)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
root->flags.with_thread_cache = false;
|
||||||
|
// Doesn't destroy the thread cache object(s). For background threads, they
|
||||||
|
// will be collected (and free cached memory) at thread destruction
|
||||||
|
// time. For the main thread, we leak it.
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnablePartitionAllocThreadCacheForRootIfDisabled(
|
||||||
|
ThreadSafePartitionRoot* root) {
|
||||||
|
if (!root)
|
||||||
|
return;
|
||||||
|
root->flags.with_thread_cache = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
void DisablePartitionAllocThreadCacheForProcess() {
|
||||||
|
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
auto* aligned_allocator =
|
||||||
|
::base::internal::PartitionAllocMalloc::AlignedAllocator();
|
||||||
|
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||||
|
if (aligned_allocator != regular_allocator)
|
||||||
|
DisableThreadCacheForRootIfEnabled(aligned_allocator);
|
||||||
|
DisableThreadCacheForRootIfEnabled(
|
||||||
|
::base::internal::PartitionAllocMalloc::OriginalAllocator());
|
||||||
|
}
|
||||||
|
#endif // defined(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
DisablePartitionAllocThreadCacheForProcess();
|
||||||
|
#else
|
||||||
|
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
ThreadCache::SwapForTesting(root);
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
}
|
||||||
|
|
||||||
|
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
// First, disable the test thread cache we have.
|
||||||
|
DisableThreadCacheForRootIfEnabled(root);
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||||
|
|
||||||
|
ThreadCache::SwapForTesting(regular_allocator);
|
||||||
|
#else
|
||||||
|
ThreadCache::SwapForTesting(nullptr);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
24
src/base/allocator/partition_allocator/extended_api.h
Normal file
24
src/base/allocator/partition_allocator/extended_api.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
// These two functions are unsafe to run if there are multiple threads running
|
||||||
|
// in the process.
|
||||||
|
//
|
||||||
|
// Disables the thread cache for the entire process, and replaces it with a
|
||||||
|
// thread cache for |root|.
|
||||||
|
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||||
|
// Disables the current thread cache, and replaces it with the default for the
|
||||||
|
// process.
|
||||||
|
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
157
src/base/allocator/partition_allocator/glossary.md
Normal file
157
src/base/allocator/partition_allocator/glossary.md
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
# Glossary
|
||||||
|
|
||||||
|
This page describes some core terminology used in PartitionAlloc.
|
||||||
|
A weak attempt is made to present terms "in conceptual order" s.t.
|
||||||
|
each term depends mainly upon previously defined ones.
|
||||||
|
|
||||||
|
## Top-Level Terms
|
||||||
|
|
||||||
|
* **Partition**: A heap that is separated and protected both from other
|
||||||
|
partitions and from non-PartitionAlloc memory. Each partition holds
|
||||||
|
multiple buckets.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
**NOTE**: In code (and comments), "partition," "root," and even
|
||||||
|
"allocator" are all conceptually the same thing.
|
||||||
|
***
|
||||||
|
|
||||||
|
* **Bucket**: A collection of regions in a partition that contains
|
||||||
|
similar-sized objects. For example, one bucket may hold objects of
|
||||||
|
size (224, 256], another (256, 320], etc. Bucket size
|
||||||
|
brackets are geometrically spaced,
|
||||||
|
[going up to `kMaxBucketed`][max-bucket-comment].
|
||||||
|
* **Normal Bucket**: Any bucket whose size ceiling does not exceed
|
||||||
|
`kMaxBucketed`. This is the common case in PartitionAlloc, and
|
||||||
|
the "normal" modifier is often dropped in casual reference.
|
||||||
|
* **Direct Map (Bucket)**: Any allocation whose size exceeds `kMaxBucketed`.
|
||||||
|
|
||||||
|
Buckets consist of slot spans, organized as linked lists (see below).
|
||||||
|
|
||||||
|
## Pages
|
||||||
|
|
||||||
|
* **System Page**: A memory page defined by the CPU/OS. Commonly
|
||||||
|
referred to as a "virtual page" in other contexts. This is typically
|
||||||
|
4KiB, but it can be larger. PartitionAlloc supports up to 64KiB,
|
||||||
|
though this constant isn't always known at compile time (depending
|
||||||
|
on the OS).
|
||||||
|
* **Partition Page**: The most common granularity used by
|
||||||
|
PartitionAlloc. Consists of exactly 4 system pages.
|
||||||
|
* **Super Page**: A 2MiB region, aligned on a 2MiB boundary. Not to
|
||||||
|
be confused with OS-level terms like "large page" or "huge page",
|
||||||
|
which are also commonly 2MiB. These have to be fully committed /
|
||||||
|
uncommitted in memory, whereas super pages can be partially committed
|
||||||
|
with system page granularity.
|
||||||
|
* **Extent**: An extent is a run of consecutive super pages (belonging
|
||||||
|
to a single partition). Extents are to super pages what slot spans are
|
||||||
|
to slots (see below).
|
||||||
|
|
||||||
|
## Slots and Spans
|
||||||
|
|
||||||
|
* **Slot**: An indivisible allocation unit. Slot sizes are tied to
|
||||||
|
buckets. For example, each allocation that falls into the bucket
|
||||||
|
(224, 256] would be satisfied with a slot of size 256. This
|
||||||
|
applies only to normal buckets, not to direct map.
|
||||||
|
* **Slot Span**: A run of same-sized slots that are contiguous in
|
||||||
|
memory. Slot span size is a multiple of partition page size, but it
|
||||||
|
isn't always a multiple of slot size, although we try hard for this
|
||||||
|
to be the case.
|
||||||
|
* **Small Bucket**: Allocations up to 4 partition pages. In these
|
||||||
|
cases, slot spans are always between 1 and 4 partition pages in
|
||||||
|
size. For each slot span size, the slot span is chosen to minimize
|
||||||
|
number of pages used while keeping the rounding waste under a
|
||||||
|
reasonable limit.
|
||||||
|
* For example, for a slot size 96, 64B waste is deemed acceptable
|
||||||
|
when using a single partition page, but for slot size
|
||||||
|
384, the potential waste of 256B wouldn't be, so 3 partition pages
|
||||||
|
are used to achieve 0B waste.
|
||||||
|
* PartitionAlloc may avoid waste by lowering the number of committed
|
||||||
|
system pages compared to the number of reserved pages. For
|
||||||
|
example, for the slot size of 896B we'd use a slot span of 2
|
||||||
|
partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit
|
||||||
|
only up to 7, thus resulting in perfect packing.
|
||||||
|
* **Single-Slot Span**: Allocations above 4 partition pages (but
|
||||||
|
≤`kMaxBucketed`). This is because each slot span is guaranteed to
|
||||||
|
hold exactly one slot.
|
||||||
|
* Fun fact: there are sizes ≤4 partition pages that result in a
|
||||||
|
slot span having exactly 1 slot, but nonetheless they're still
|
||||||
|
classified as small buckets. The reason is that single-slot spans
|
||||||
|
are often handled by a different code path, and that distinction
|
||||||
|
is made purely based on slot size, for simplicity and efficiency.
|
||||||
|
|
||||||
|
## Other Terms
|
||||||
|
|
||||||
|
* **Object**: A chunk of memory returned to the allocating invoker
|
||||||
|
of the size requested. It doesn't have to span the entire slot,
|
||||||
|
nor does it have to begin at the slot start. This term is commonly
|
||||||
|
used as a parameter name in PartitionAlloc code, as opposed to
|
||||||
|
`slot_start`.
|
||||||
|
* **Thread Cache**: A [thread-local structure][pa-thread-cache] that
|
||||||
|
holds some not-too-large memory chunks, ready to be allocated. This
|
||||||
|
speeds up in-thread allocation by reducing a lock hold to a
|
||||||
|
thread-local storage lookup, improving cache locality.
|
||||||
|
* **GigaCage**: A memory region several gigabytes wide, reserved by
|
||||||
|
PartitionAlloc upon initialization, from which all allocations are
|
||||||
|
taken. The motivation for GigaCage is for code to be able to examine
|
||||||
|
a pointer and to immediately determine whether or not the memory was
|
||||||
|
allocated by PartitionAlloc. This provides support for a number of
|
||||||
|
features, including
|
||||||
|
[StarScan][starscan-readme] and
|
||||||
|
[BackupRefPtr][brp-doc].
|
||||||
|
* Note that GigaCage only exists in builds with 64-bit pointers.
|
||||||
|
* In builds with 32-bit pointers, PartitionAlloc tracks pointers
|
||||||
|
it dispenses with a bitmap. This is often referred to as "fake
|
||||||
|
GigaCage" (or simply "GigaCage") for lack of a better term.
|
||||||
|
* **Payload**: The usable area of a super page in which slot spans
|
||||||
|
reside. While generally this means "everything between the first
|
||||||
|
and last guard partition pages in a super page," the presence of
|
||||||
|
other metadata (e.g. StarScan bitmaps) can bump the starting offset
|
||||||
|
forward. While this term is entrenched in the code, the team
|
||||||
|
considers it suboptimal and is actively looking for a replacement.
|
||||||
|
* **Allocation Fast Path**: A path taken during an allocation that is
|
||||||
|
considered fast. Usually means that an allocation request can be
|
||||||
|
immediately satisfied by grabbing a slot from the freelist of the
|
||||||
|
first active slot span in the bucket.
|
||||||
|
* **Allocation Slow Path**: Anything which is not fast (see above).
|
||||||
|
Can involve
|
||||||
|
* finding another active slot span in the list,
|
||||||
|
* provisioning more slots in a slot span,
|
||||||
|
* bringing back a free (or decommitted) slot span,
|
||||||
|
* allocating a new slot span, or even
|
||||||
|
* allocating a new super page.
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
By "slow" we may mean something as simple as extra logic (`if`
|
||||||
|
statements etc.), or something as costly as system calls.
|
||||||
|
***
|
||||||
|
|
||||||
|
## PartitionAlloc-Everywhere
|
||||||
|
|
||||||
|
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
|
||||||
|
It was invoked explicitly, by calling PartitionAlloc APIs directly.
|
||||||
|
|
||||||
|
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
|
||||||
|
to the entire-ish codebase (exclusions apply). This was done by intercepting
|
||||||
|
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
|
||||||
|
routing them into PartitionAlloc. The shim located in
|
||||||
|
`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
|
||||||
|
responsible for intercepting. For more details, see
|
||||||
|
[base/allocator/README.md](../../../base/allocator/README.md).
|
||||||
|
|
||||||
|
A special, catch-it-all *Malloc* partition has been created for the intercepted
|
||||||
|
`malloc()` et al. This is to isolate from already existing Blink partitions.
|
||||||
|
The only exception from that is Blink's *FastMalloc* partition, which was also
|
||||||
|
catch-it-all in nature, so it's perfectly fine to merge these together, to
|
||||||
|
minimize fragmentation.
|
||||||
|
|
||||||
|
As of 2022, PartitionAlloc-Everywhere is supported on
|
||||||
|
|
||||||
|
* Windows 32- and 64-bit
|
||||||
|
* Linux
|
||||||
|
* Android 32- and 64-bit
|
||||||
|
* macOS
|
||||||
|
* Fuchsia
|
||||||
|
|
||||||
|
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
|
||||||
|
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
|
||||||
|
[starscan-readme]: https://chromium.googlesource.com/chromium/src/+/main/base/allocator/partition_allocator/starscan/README.md
|
||||||
|
[brp-doc]: https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/preview
|
96
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
96
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
|
||||||
|
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
|
||||||
|
// cause significant jank.
|
||||||
|
#define PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM 0
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// static
|
||||||
|
MemoryReclaimer* MemoryReclaimer::Instance() {
|
||||||
|
static internal::base::NoDestructor<MemoryReclaimer> instance;
|
||||||
|
return instance.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::RegisterPartition(PartitionRoot<>* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
auto it_and_whether_inserted = partitions_.insert(partition);
|
||||||
|
PA_DCHECK(it_and_whether_inserted.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::UnregisterPartition(
|
||||||
|
PartitionRoot<internal::ThreadSafe>* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
size_t erased_count = partitions_.erase(partition);
|
||||||
|
PA_DCHECK(erased_count == 1u);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryReclaimer::MemoryReclaimer() = default;
|
||||||
|
MemoryReclaimer::~MemoryReclaimer() = default;
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimAll() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages |
|
||||||
|
PurgeFlags::kAggressiveReclaim;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimNormal() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::Reclaim(int flags) {
|
||||||
|
internal::ScopedGuard lock(
|
||||||
|
lock_); // Has to protect from concurrent (Un)Register calls.
|
||||||
|
|
||||||
|
// PCScan quarantines freed slots. Trigger the scan first to let it call
|
||||||
|
// FreeNoHooksImmediate on slots that pass the quarantine.
|
||||||
|
//
|
||||||
|
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
|
||||||
|
// so that the slots are actually freed. (This is done synchronously only for
|
||||||
|
// the current thread.)
|
||||||
|
//
|
||||||
|
// Lastly decommit empty slot spans and lastly try to discard unused pages at
|
||||||
|
// the end of the remaining active slots.
|
||||||
|
#if PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM
|
||||||
|
{
|
||||||
|
using PCScan = internal::PCScan;
|
||||||
|
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
|
||||||
|
? PCScan::InvocationMode::kForcedBlocking
|
||||||
|
: PCScan::InvocationMode::kBlocking;
|
||||||
|
PCScan::PerformScanIfNeeded(invocation_mode);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
// Don't completely empty the thread cache outside of low memory situations,
|
||||||
|
// as there is periodic purge which makes sure that it doesn't take too much
|
||||||
|
// space.
|
||||||
|
if (flags & PurgeFlags::kAggressiveReclaim)
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (auto* partition : partitions_)
|
||||||
|
partition->PurgeMemory(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ResetForTesting() {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
partitions_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
74
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
74
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Posts and handles memory reclaim tasks for PartitionAlloc.
|
||||||
|
//
|
||||||
|
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
|
||||||
|
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
|
||||||
|
// context of the provided |SequencedTaskRunner|, meaning that the caller must
|
||||||
|
// take care of this runner being compatible with the various partitions.
|
||||||
|
//
|
||||||
|
// Singleton as this runs as long as the process is alive, and
|
||||||
|
// having multiple instances would be wasteful.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
|
||||||
|
public:
|
||||||
|
static MemoryReclaimer* Instance();
|
||||||
|
|
||||||
|
MemoryReclaimer(const MemoryReclaimer&) = delete;
|
||||||
|
MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
|
||||||
|
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Registers a partition to be tracked by the reclaimer.
|
||||||
|
void RegisterPartition(PartitionRoot<>* partition);
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Unregisters a partition to be tracked by the reclaimer.
|
||||||
|
void UnregisterPartition(PartitionRoot<>* partition);
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now to reclaim as much free memory as
|
||||||
|
// possible. The API callers need to invoke this method periodically
|
||||||
|
// if they want to use memory reclaimer.
|
||||||
|
// See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
|
||||||
|
void ReclaimNormal();
|
||||||
|
|
||||||
|
// Returns a recommended interval to invoke ReclaimNormal.
|
||||||
|
int64_t GetRecommendedReclaimIntervalInMicroseconds() {
|
||||||
|
return internal::base::Seconds(4).InMicroseconds();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now reclaiming all free memory
|
||||||
|
void ReclaimAll();
|
||||||
|
|
||||||
|
private:
|
||||||
|
MemoryReclaimer();
|
||||||
|
~MemoryReclaimer();
|
||||||
|
// |flags| is an OR of base::PartitionPurgeFlags
|
||||||
|
void Reclaim(int flags);
|
||||||
|
void ReclaimAndReschedule();
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
internal::Lock lock_;
|
||||||
|
std::set<PartitionRoot<>*> partitions_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
friend class internal::base::NoDestructor<MemoryReclaimer>;
|
||||||
|
friend class MemoryReclaimerTest;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
81
src/base/allocator/partition_allocator/oom.cc
Normal file
81
src/base/allocator/partition_allocator/oom.cc
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
size_t g_oom_size = 0U;
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
|
||||||
|
// TODO(crbug.com/1151236): Update to
|
||||||
|
// partition_alloc::internal::base::internal::OnNoMemoryInternal
|
||||||
|
PA_NOINLINE void OnNoMemoryInternal(size_t size) {
|
||||||
|
g_oom_size = size;
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// Kill the process. This is important for security since most of code
|
||||||
|
// does not check the result of memory allocation.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/het71c37.aspx
|
||||||
|
// Pass the size of the failed request in an exception argument.
|
||||||
|
ULONG_PTR exception_args[] = {size};
|
||||||
|
::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
|
||||||
|
std::size(exception_args), exception_args);
|
||||||
|
|
||||||
|
// Safety check, make sure process exits here.
|
||||||
|
_exit(win::kOomExceptionCode);
|
||||||
|
#else
|
||||||
|
size_t tmp_size = size;
|
||||||
|
internal::base::debug::Alias(&tmp_size);
|
||||||
|
|
||||||
|
// Note: Don't add anything that may allocate here. Depending on the
|
||||||
|
// allocator, this may be called from within the allocator (e.g. with
|
||||||
|
// PartitionAlloc), and would deadlock as our locks are not recursive.
|
||||||
|
//
|
||||||
|
// Additionally, this is unlikely to work, since allocating from an OOM
|
||||||
|
// handler is likely to fail.
|
||||||
|
//
|
||||||
|
// Use PA_IMMEDIATE_CRASH() so that the top frame in the crash is our code,
|
||||||
|
// rather than using abort() or similar; this avoids the crash server needing
|
||||||
|
// to be able to successfully unwind through libc to get to the correct
|
||||||
|
// address, which is particularly an issue on Android.
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
void TerminateBecauseOutOfMemory(size_t size) {
|
||||||
|
internal::OnNoMemoryInternal(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||||
|
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||||
|
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||||
|
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
|
||||||
|
RunPartitionAllocOomCallback();
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
70
src/base/allocator/partition_allocator/oom.h
Normal file
70
src/base/allocator/partition_allocator/oom.h
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Terminates process. Should be called only for out of memory errors.
|
||||||
|
// |size| is the size of the failed allocation, or 0 if not known.
|
||||||
|
// Crash reporting classifies such crashes as OOM.
|
||||||
|
// Must be allocation-safe.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void TerminateBecauseOutOfMemory(size_t size);
|
||||||
|
|
||||||
|
// Records the size of the allocation that caused the current OOM crash, for
|
||||||
|
// consumption by Breakpad.
|
||||||
|
// TODO: this can be removed when Breakpad is no longer supported.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
namespace win {
|
||||||
|
|
||||||
|
// Custom Windows exception code chosen to indicate an out of memory error.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
|
||||||
|
// "To make sure that you do not define a code that conflicts with an existing
|
||||||
|
// exception code" ... "The resulting error code should therefore have the
|
||||||
|
// highest four bits set to hexadecimal E."
|
||||||
|
// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
|
||||||
|
const DWORD kOomExceptionCode = 0xe0000008;
|
||||||
|
|
||||||
|
} // namespace win
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||||
|
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||||
|
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||||
|
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
|
||||||
|
OnNoMemory(size_t size);
|
||||||
|
|
||||||
|
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
|
||||||
|
// exception on Windows to signal this is OOM and not a normal assert.
|
||||||
|
// OOM_CRASH(size) is called by users of PageAllocator (including
|
||||||
|
// PartitionAlloc) to signify an allocation failure from the platform.
|
||||||
|
#define OOM_CRASH(size) \
|
||||||
|
do { \
|
||||||
|
/* Raising an exception might allocate, allow that. */ \
|
||||||
|
::partition_alloc::ScopedAllowAllocations guard{}; \
|
||||||
|
::partition_alloc::internal::OnNoMemory(size); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
PartitionAllocOomCallback g_oom_callback;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
|
||||||
|
PA_DCHECK(!g_oom_callback);
|
||||||
|
g_oom_callback = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
void RunPartitionAllocOomCallback() {
|
||||||
|
if (g_oom_callback)
|
||||||
|
g_oom_callback();
|
||||||
|
}
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
26
src/base/allocator/partition_allocator/oom_callback.h
Normal file
26
src/base/allocator/partition_allocator/oom_callback.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
using PartitionAllocOomCallback = void (*)();
|
||||||
|
|
||||||
|
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
|
||||||
|
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
|
||||||
|
// allocation failure from the platform.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback);
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RunPartitionAllocOomCallback();
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
382
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
382
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
@ -0,0 +1,382 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
|
||||||
|
#elif BUILDFLAG(IS_POSIX)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
|
||||||
|
#elif BUILDFLAG(IS_FUCHSIA)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
|
||||||
|
#else
|
||||||
|
#error Platform not supported.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
internal::Lock g_reserve_lock;
|
||||||
|
|
||||||
|
// We may reserve/release address space on different threads.
|
||||||
|
internal::Lock& GetReserveLock() {
|
||||||
|
return g_reserve_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::atomic<size_t> g_total_mapped_address_space;
|
||||||
|
|
||||||
|
// We only support a single block of reserved address space.
|
||||||
|
uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
|
||||||
|
size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
|
||||||
|
|
||||||
|
uintptr_t AllocPagesIncludingReserved(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
uintptr_t ret =
|
||||||
|
internal::SystemAllocPages(address, length, accessibility, page_tag);
|
||||||
|
if (!ret) {
|
||||||
|
const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
|
||||||
|
if (cant_alloc_length) {
|
||||||
|
// The system cannot allocate |length| bytes. Release any reserved address
|
||||||
|
// space and try once more.
|
||||||
|
ReleaseReservation();
|
||||||
|
ret =
|
||||||
|
internal::SystemAllocPages(address, length, accessibility, page_tag);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trims memory at |base_address| to given |trim_length| and |alignment|.
|
||||||
|
//
|
||||||
|
// On failure, on Windows, this function returns 0 and frees memory at
|
||||||
|
// |base_address|.
|
||||||
|
uintptr_t TrimMapping(uintptr_t base_address,
|
||||||
|
size_t base_length,
|
||||||
|
size_t trim_length,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t alignment_offset,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(base_length >= trim_length);
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||||
|
PA_DCHECK(alignment_offset < alignment);
|
||||||
|
uintptr_t new_base =
|
||||||
|
NextAlignedWithOffset(base_address, alignment, alignment_offset);
|
||||||
|
PA_DCHECK(new_base >= base_address);
|
||||||
|
size_t pre_slack = new_base - base_address;
|
||||||
|
size_t post_slack = base_length - pre_slack - trim_length;
|
||||||
|
PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
|
||||||
|
PA_DCHECK(pre_slack < base_length);
|
||||||
|
PA_DCHECK(post_slack < base_length);
|
||||||
|
return internal::TrimMappingInternal(base_address, base_length, trim_length,
|
||||||
|
accessibility, pre_slack, post_slack);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// Align |address| up to the closest, non-smaller address, that gives
|
||||||
|
// |requested_offset| remainder modulo |alignment|.
|
||||||
|
//
|
||||||
|
// Examples for alignment=1024 and requested_offset=64:
|
||||||
|
// 64 -> 64
|
||||||
|
// 65 -> 1088
|
||||||
|
// 1024 -> 1088
|
||||||
|
// 1088 -> 1088
|
||||||
|
// 1089 -> 2112
|
||||||
|
// 2048 -> 2112
|
||||||
|
uintptr_t NextAlignedWithOffset(uintptr_t address,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t requested_offset) {
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||||
|
PA_DCHECK(requested_offset < alignment);
|
||||||
|
|
||||||
|
uintptr_t actual_offset = address & (alignment - 1);
|
||||||
|
uintptr_t new_address;
|
||||||
|
if (actual_offset <= requested_offset)
|
||||||
|
new_address = address + requested_offset - actual_offset;
|
||||||
|
else
|
||||||
|
new_address = address + alignment + requested_offset - actual_offset;
|
||||||
|
PA_DCHECK(new_address >= address);
|
||||||
|
PA_DCHECK(new_address - address < alignment);
|
||||||
|
PA_DCHECK(new_address % alignment == requested_offset);
|
||||||
|
|
||||||
|
return new_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPages(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t ret =
|
||||||
|
internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
|
||||||
|
if (ret)
|
||||||
|
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
uintptr_t AllocPages(size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
|
||||||
|
page_tag);
|
||||||
|
}
|
||||||
|
uintptr_t AllocPages(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
|
||||||
|
page_tag);
|
||||||
|
}
|
||||||
|
void* AllocPages(void* address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return reinterpret_cast<void*>(
|
||||||
|
AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
|
||||||
|
accessibility, page_tag));
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AllocPagesWithAlignOffset(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
size_t align_offset,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
PA_DCHECK(length >= internal::PageAllocationGranularity());
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(align >= internal::PageAllocationGranularity());
|
||||||
|
// Alignment must be power of 2 for masking math to work.
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
|
||||||
|
PA_DCHECK(align_offset < align);
|
||||||
|
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t align_offset_mask = align - 1;
|
||||||
|
uintptr_t align_base_mask = ~align_offset_mask;
|
||||||
|
PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
|
||||||
|
|
||||||
|
// If the client passed null as the address, choose a good one.
|
||||||
|
if (!address) {
|
||||||
|
address = (GetRandomPageBase() & align_base_mask) + align_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// First try to force an exact-size, aligned allocation from our random base.
|
||||||
|
#if defined(ARCH_CPU_32_BITS)
|
||||||
|
// On 32 bit systems, first try one random aligned address, and then try an
|
||||||
|
// aligned address derived from the value of |ret|.
|
||||||
|
constexpr int kExactSizeTries = 2;
|
||||||
|
#else
|
||||||
|
// On 64 bit systems, try 3 random aligned addresses.
|
||||||
|
constexpr int kExactSizeTries = 3;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (int i = 0; i < kExactSizeTries; ++i) {
|
||||||
|
uintptr_t ret =
|
||||||
|
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
|
||||||
|
if (ret) {
|
||||||
|
// If the alignment is to our liking, we're done.
|
||||||
|
if ((ret & align_offset_mask) == align_offset)
|
||||||
|
return ret;
|
||||||
|
// Free the memory and try again.
|
||||||
|
FreePages(ret, length);
|
||||||
|
} else {
|
||||||
|
// |ret| is null; if this try was unhinted, we're OOM.
|
||||||
|
if (internal::kHintIsAdvisory || !address)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_32_BITS)
|
||||||
|
// For small address spaces, try the first aligned address >= |ret|. Note
|
||||||
|
// |ret| may be null, in which case |address| becomes null. If
|
||||||
|
// |align_offset| is non-zero, this calculation may get us not the first,
|
||||||
|
// but the next matching address.
|
||||||
|
address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
|
||||||
|
#else // defined(ARCH_CPU_64_BITS)
|
||||||
|
// Keep trying random addresses on systems that have a large address space.
|
||||||
|
address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a larger allocation so we can force alignment.
|
||||||
|
size_t try_length = length + (align - internal::PageAllocationGranularity());
|
||||||
|
PA_CHECK(try_length >= length);
|
||||||
|
uintptr_t ret;
|
||||||
|
|
||||||
|
do {
|
||||||
|
// Continue randomizing only on POSIX.
|
||||||
|
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
|
||||||
|
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
|
||||||
|
page_tag);
|
||||||
|
// The retries are for Windows, where a race can steal our mapping on
|
||||||
|
// resize.
|
||||||
|
} while (ret && (ret = TrimMapping(ret, try_length, length, align,
|
||||||
|
align_offset, accessibility)) == 0);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
internal::FreePagesInternal(address, length);
|
||||||
|
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
|
||||||
|
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
void FreePages(void* address, size_t length) {
|
||||||
|
FreePages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TrySetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
return internal::TrySetSystemPagesAccessInternal(address, length,
|
||||||
|
accessibility);
|
||||||
|
}
|
||||||
|
bool TrySetSystemPagesAccess(void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
|
||||||
|
accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::SetSystemPagesAccessInternal(address, length, accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DecommitSystemPagesInternal(address, length,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
void DecommitSystemPages(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DecommitAndZeroSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
void DecommitAndZeroSystemPages(void* address, size_t length) {
|
||||||
|
DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
internal::RecommitSystemPagesInternal(address, length, accessibility,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TryRecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// Duplicated because we want errors to be reported at a lower level in the
|
||||||
|
// crashing case.
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
return internal::TryRecommitSystemPagesInternal(
|
||||||
|
address, length, accessibility, accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DiscardSystemPages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DiscardSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
void DiscardSystemPages(void* address, size_t length) {
|
||||||
|
DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ReserveAddressSpace(size_t size) {
|
||||||
|
// To avoid deadlock, call only SystemAllocPages.
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
if (!s_reservation_address) {
|
||||||
|
uintptr_t mem = internal::SystemAllocPages(
|
||||||
|
0, size, PageAccessibilityConfiguration::kInaccessible,
|
||||||
|
PageTag::kChromium);
|
||||||
|
if (mem) {
|
||||||
|
// We guarantee this alignment when reserving address space.
|
||||||
|
PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
s_reservation_address = mem;
|
||||||
|
s_reservation_size = size;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ReleaseReservation() {
|
||||||
|
// To avoid deadlock, call only FreePages.
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
if (!s_reservation_address)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
FreePages(s_reservation_address, s_reservation_size);
|
||||||
|
s_reservation_address = 0;
|
||||||
|
s_reservation_size = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool HasReservationForTesting() {
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
return s_reservation_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t GetAllocPageErrorCode() {
|
||||||
|
return internal::s_allocPageErrorCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetTotalMappedSize() {
|
||||||
|
return g_total_mapped_address_space;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
332
src/base/allocator/partition_allocator/page_allocator.h
Normal file
332
src/base/allocator/partition_allocator/page_allocator.h
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
enum class PageAccessibilityConfiguration {
|
||||||
|
kInaccessible,
|
||||||
|
kRead,
|
||||||
|
kReadWrite,
|
||||||
|
// This flag is mapped to kReadWrite on systems that
|
||||||
|
// don't support MTE.
|
||||||
|
kReadWriteTagged,
|
||||||
|
// This flag is mapped to kReadExecute on systems
|
||||||
|
// that don't support Arm's BTI.
|
||||||
|
kReadExecuteProtected,
|
||||||
|
kReadExecute,
|
||||||
|
// This flag is deprecated and will go away soon.
|
||||||
|
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
|
||||||
|
kReadWriteExecute,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use for De/RecommitSystemPages API.
|
||||||
|
enum class PageAccessibilityDisposition {
|
||||||
|
// Enforces permission update (Decommit will set to
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible;
|
||||||
|
// Recommit will set to whatever was requested, other than
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible).
|
||||||
|
kRequireUpdate,
|
||||||
|
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
|
||||||
|
// only).
|
||||||
|
kAllowKeepForPerf,
|
||||||
|
};
|
||||||
|
|
||||||
|
// macOS supports tagged memory regions, to help in debugging. On Android,
|
||||||
|
// these tags are used to name anonymous mappings.
|
||||||
|
enum class PageTag {
|
||||||
|
kFirst = 240, // Minimum tag value.
|
||||||
|
kBlinkGC = 252, // Blink GC pages.
|
||||||
|
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
|
||||||
|
kChromium = 254, // Chromium page.
|
||||||
|
kV8 = 255, // V8 heap pages.
|
||||||
|
kLast = kV8 // Maximum tag value.
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t NextAlignedWithOffset(uintptr_t ptr,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t requested_offset);
|
||||||
|
|
||||||
|
// Allocates one or more pages.
|
||||||
|
//
|
||||||
|
// The requested |address| is just a hint; the actual address returned may
|
||||||
|
// differ. The returned address will be aligned to |align_offset| modulo |align|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
|
||||||
|
// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
|
||||||
|
// |align_offset| must be less than |align|. |align| must be a power of two.
|
||||||
|
//
|
||||||
|
// If |address| is 0/nullptr, then a suitable and randomized address will be
|
||||||
|
// chosen automatically.
|
||||||
|
//
|
||||||
|
// |accessibility| controls the permission of the allocated pages.
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible means uncommitted.
|
||||||
|
//
|
||||||
|
// |page_tag| is used on some platforms to identify the source of the
|
||||||
|
// allocation. Use PageTag::kChromium as a catch-all category.
|
||||||
|
//
|
||||||
|
// This call will return 0/nullptr if the allocation cannot be satisfied.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPages(size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPages(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void* AllocPages(void* address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPagesWithAlignOffset(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
size_t align_offset,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
|
||||||
|
// Frees one or more pages starting at |address| and continuing for |length|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
|
||||||
|
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
|
||||||
|
// |length| must be a multiple of |PageAllocationGranularity()|.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void FreePages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void FreePages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Marks one or more system pages, starting at |address| with the given
|
||||||
|
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// Returns true if the permission change succeeded. In most cases you must
|
||||||
|
// |CHECK| the result.
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
|
||||||
|
// Marks one or more system pages, starting at |address| with the given
|
||||||
|
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// Performs a CHECK that the operation succeeds.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetSystemPagesAccess(void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
|
||||||
|
// Decommits one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||||
|
// boundary.
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed!
|
||||||
|
//
|
||||||
|
// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
|
||||||
|
// the decommitted pages will be made inaccessible before the call returns.
|
||||||
|
// While it is always a programming error to access decommitted pages without
|
||||||
|
// first recommitting them, callers may use
|
||||||
|
// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
|
||||||
|
// to skip changing permissions (use with care), for performance reasons (see
|
||||||
|
// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
|
||||||
|
// in the past). Implementations may choose to always modify permissions, hence
|
||||||
|
// accessing those pages may or may not trigger a fault.
|
||||||
|
//
|
||||||
|
// Decommitting means that physical resources (RAM or swap/pagefile) backing the
|
||||||
|
// allocated virtual address range may be released back to the system, but the
|
||||||
|
// address space is still allocated to the process (possibly using up page table
|
||||||
|
// entries or other accounting resources). There is no guarantee that the pages
|
||||||
|
// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
|
||||||
|
//
|
||||||
|
// This operation may not be atomic on some platforms.
|
||||||
|
//
|
||||||
|
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
|
||||||
|
// processes will not fault when touching a committed memory region. There is
|
||||||
|
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
|
||||||
|
// best-effort allocated resources on the first touch. If
|
||||||
|
// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
|
||||||
|
// behaves in a platform-agnostic way by simulating the Windows "decommit" state
|
||||||
|
// by both discarding the region (allowing the OS to avoid swap operations)
|
||||||
|
// *and* changing the page protections so accesses fault.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitSystemPages(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Decommits one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||||
|
// boundary.
|
||||||
|
//
|
||||||
|
// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
|
||||||
|
// zeroed and will always mark the region as inaccessible (the equivalent of
|
||||||
|
// setting them to PageAccessibilityConfiguration::kInaccessible).
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitAndZeroSystemPages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitAndZeroSystemPages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Whether decommitted memory is guaranteed to be zeroed when it is
|
||||||
|
// recommitted. Do not assume that this will not change over time.
|
||||||
|
constexpr PA_COMPONENT_EXPORT(
|
||||||
|
PARTITION_ALLOC) bool DecommittedMemoryIsAlwaysZeroed() {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
return false;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// (Re)Commits one or more system pages, starting at |address| and continuing
|
||||||
|
// for |length| bytes with the given |page_accessibility| (must not be
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
|
||||||
|
// must be aligned to a system page boundary.
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed!
|
||||||
|
//
|
||||||
|
// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
|
||||||
|
// updates the pages to |page_accessibility|. This can be used regardless of
|
||||||
|
// what disposition was used to decommit the pages.
|
||||||
|
// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
|
||||||
|
// to leave the page permissions, if that improves performance. This option can
|
||||||
|
// only be used if the pages were previously accessible and decommitted with
|
||||||
|
// that same option.
|
||||||
|
//
|
||||||
|
// The memory will be zeroed when it is committed for the first time. However,
|
||||||
|
// there is no such guarantee when memory is recommitted, unless
|
||||||
|
// |DecommittedMemoryIsAlwaysZeroed()| is true.
|
||||||
|
//
|
||||||
|
// This operation may not be atomic on some platforms.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void RecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Like RecommitSystemPages(), but returns false instead of crashing.
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TryRecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Discard one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
|
||||||
|
//
|
||||||
|
// Discarding is a hint to the system that the page is no longer required. The
|
||||||
|
// hint may:
|
||||||
|
// - Do nothing.
|
||||||
|
// - Discard the page immediately, freeing up physical pages.
|
||||||
|
// - Discard the page at some time in the future in response to memory
|
||||||
|
// pressure.
|
||||||
|
//
|
||||||
|
// Only committed pages should be discarded. Discarding a page does not decommit
|
||||||
|
// it, and it is valid to discard an already-discarded page. A read or write to
|
||||||
|
// a discarded page will not fault.
|
||||||
|
//
|
||||||
|
// Reading from a discarded page may return the original page content, or a page
|
||||||
|
// full of zeroes.
|
||||||
|
//
|
||||||
|
// Writing to a discarded page is the only guaranteed way to tell the system
|
||||||
|
// that the page is required again. Once written to, the content of the page is
|
||||||
|
// guaranteed stable once more. After being written to, the page content may be
|
||||||
|
// based on the original page content, or a page of zeroes.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DiscardSystemPages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DiscardSystemPages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
|
||||||
|
// 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundUpToSystemPage(uintptr_t address) {
|
||||||
|
return (address + internal::SystemPageOffsetMask()) &
|
||||||
|
internal::SystemPageBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
|
||||||
|
// 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundDownToSystemPage(uintptr_t address) {
|
||||||
|
return address & internal::SystemPageBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
|
||||||
|
// Returns 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundUpToPageAllocationGranularity(uintptr_t address) {
|
||||||
|
return (address + internal::PageAllocationGranularityOffsetMask()) &
|
||||||
|
internal::PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds down |address| to the previous multiple of
|
||||||
|
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundDownToPageAllocationGranularity(uintptr_t address) {
|
||||||
|
return address & internal::PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserves (at least) |size| bytes of address space, aligned to
|
||||||
|
// |PageAllocationGranularity()|. This can be called early on to make it more
|
||||||
|
// likely that large allocations will succeed. Returns true if the reservation
|
||||||
|
// succeeded, false if the reservation failed or a reservation was already made.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReserveAddressSpace(size_t size);
|
||||||
|
|
||||||
|
// Releases any reserved address space. |AllocPages| calls this automatically on
|
||||||
|
// an allocation failure. External allocators may also call this on failure.
|
||||||
|
//
|
||||||
|
// Returns true when an existing reservation was released.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReleaseReservation();
|
||||||
|
|
||||||
|
// Returns true if there is currently an address space reservation.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasReservationForTesting();
|
||||||
|
|
||||||
|
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
|
||||||
|
// (POSIX) or |VirtualAlloc| (Windows) fails.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetAllocPageErrorCode();
|
||||||
|
|
||||||
|
// Returns the total amount of mapped pages from all clients of
|
||||||
|
// PageAllocator. These pages may or may not be committed. This is mostly useful
|
||||||
|
// to assess address space pressure.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
@ -0,0 +1,169 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
#include <mach/vm_page_size.h>
|
||||||
|
|
||||||
|
// Although page allocator constants are not constexpr, they are run-time
|
||||||
|
// constant. Because the underlying variables they access, such as vm_page_size,
|
||||||
|
// are not marked const, the compiler normally has no way to know that they
|
||||||
|
// don’t change and must obtain their values whenever it can't prove that they
|
||||||
|
// haven't been modified, even if they had already been obtained previously.
|
||||||
|
// Attaching __attribute__((const)) to these declarations allows these redundant
|
||||||
|
// accesses to be omitted under optimization such as common subexpression
|
||||||
|
// elimination.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// This should work for all POSIX (if needed), but currently all other
|
||||||
|
// supported OS/architecture combinations use either hard-coded values
|
||||||
|
// (such as x86) or have means to determine these values without needing
|
||||||
|
// atomics (such as macOS on arm64).
|
||||||
|
|
||||||
|
// Page allocator constants are run-time constant
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||||
|
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// Holds the current page size and shift, where size = 1 << shift
|
||||||
|
// Use PageAllocationGranularity(), PageAllocationGranularityShift()
|
||||||
|
// to initialize and retrieve these values safely.
|
||||||
|
struct PageCharacteristics {
|
||||||
|
std::atomic<size_t> size;
|
||||||
|
std::atomic<size_t> shift;
|
||||||
|
};
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
extern PageCharacteristics page_characteristics;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// When defined, page size constants are fixed at compile time. When not
|
||||||
|
// defined, they may vary at run time.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
|
||||||
|
|
||||||
|
// Use this macro to declare a function as constexpr or not based on whether
|
||||||
|
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// Forward declaration, implementation below
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularity();
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityShift() {
|
||||||
|
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
|
||||||
|
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
|
||||||
|
// sizes. Since 64kB is the de facto standard on the platform and binaries
|
||||||
|
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
|
||||||
|
// here.
|
||||||
|
return 16; // 64kB
|
||||||
|
#elif defined(_MIPS_ARCH_LOONGSON)
|
||||||
|
return 14; // 16kB
|
||||||
|
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
return static_cast<size_t>(vm_page_shift);
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
|
||||||
|
// page sizes. Retrieve from or initialize cache.
|
||||||
|
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
|
||||||
|
if (PA_UNLIKELY(shift == 0)) {
|
||||||
|
shift = static_cast<size_t>(
|
||||||
|
__builtin_ctz((unsigned int)PageAllocationGranularity()));
|
||||||
|
page_characteristics.shift.store(shift, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
return shift;
|
||||||
|
#else
|
||||||
|
return 12; // 4kB
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularity() {
|
||||||
|
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
|
||||||
|
// below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
|
||||||
|
return vm_page_size;
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
|
||||||
|
// initialize cache.
|
||||||
|
size_t size = page_characteristics.size.load(std::memory_order_relaxed);
|
||||||
|
if (PA_UNLIKELY(size == 0)) {
|
||||||
|
size = static_cast<size_t>(getpagesize());
|
||||||
|
page_characteristics.size.store(size, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
#else
|
||||||
|
return 1 << PageAllocationGranularityShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityOffsetMask() {
|
||||||
|
return PageAllocationGranularity() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityBaseMask() {
|
||||||
|
return ~PageAllocationGranularityOffsetMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageShift() {
|
||||||
|
// On Windows allocation granularity is higher than the page size. This comes
|
||||||
|
// into play when reserving address space range (allocation granularity),
|
||||||
|
// compared to committing pages into memory (system page granularity).
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
return 12; // 4096=1<<12
|
||||||
|
#else
|
||||||
|
return PageAllocationGranularityShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageSize() {
|
||||||
|
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||||
|
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||||
|
// This is literally equivalent to |1 << SystemPageShift()| below, but was
|
||||||
|
// separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
|
||||||
|
// non-constexpr.
|
||||||
|
return PageAllocationGranularity();
|
||||||
|
#else
|
||||||
|
return 1 << SystemPageShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageOffsetMask() {
|
||||||
|
return SystemPageSize() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageBaseMask() {
|
||||||
|
return ~SystemPageOffsetMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
|
||||||
|
constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPages(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user