mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-11-28 00:06:09 +03:00
Import chromium-104.0.5112.79
This commit is contained in:
commit
a092302d78
39
src/.clang-format
Normal file
39
src/.clang-format
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Defines the Chromium style for automatic reformatting.
|
||||||
|
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||||
|
BasedOnStyle: Chromium
|
||||||
|
# This defaults to 'Auto'. Explicitly set it for a while, so that
|
||||||
|
# 'vector<vector<int> >' in existing files gets formatted to
|
||||||
|
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
|
||||||
|
# 'int>>' if the file already contains at least one such instance.)
|
||||||
|
Standard: Cpp11
|
||||||
|
|
||||||
|
# Make sure code like:
|
||||||
|
# IPC_BEGIN_MESSAGE_MAP()
|
||||||
|
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
|
||||||
|
# IPC_END_MESSAGE_MAP()
|
||||||
|
# gets correctly indented.
|
||||||
|
MacroBlockBegin: "^\
|
||||||
|
BEGIN_MSG_MAP|\
|
||||||
|
BEGIN_MSG_MAP_EX|\
|
||||||
|
BEGIN_SAFE_MSG_MAP_EX|\
|
||||||
|
CR_BEGIN_MSG_MAP_EX|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP|\
|
||||||
|
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN|\
|
||||||
|
IPC_STRUCT_BEGIN_WITH_PARENT|\
|
||||||
|
IPC_STRUCT_TRAITS_BEGIN|\
|
||||||
|
POLPARAMS_BEGIN|\
|
||||||
|
PPAPI_BEGIN_MESSAGE_MAP$"
|
||||||
|
MacroBlockEnd: "^\
|
||||||
|
CR_END_MSG_MAP|\
|
||||||
|
END_MSG_MAP|\
|
||||||
|
IPC_END_MESSAGE_MAP|\
|
||||||
|
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
|
||||||
|
IPC_STRUCT_END|\
|
||||||
|
IPC_STRUCT_TRAITS_END|\
|
||||||
|
POLPARAMS_END|\
|
||||||
|
PPAPI_END_MESSAGE_MAP$"
|
||||||
|
|
||||||
|
# TODO: Remove this once clang-format r357700 is rolled in.
|
||||||
|
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']
|
58
src/.gitattributes
vendored
Normal file
58
src/.gitattributes
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Stop Windows python license check presubmit errors by forcing LF checkout.
|
||||||
|
*.py text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
|
||||||
|
/net/http/*.pins text eol=lf
|
||||||
|
|
||||||
|
# Force LF checkout for all source files
|
||||||
|
*.bin binary
|
||||||
|
*.c text eol=lf
|
||||||
|
*.cc text eol=lf
|
||||||
|
*.cpp text eol=lf
|
||||||
|
*.csv text eol=lf
|
||||||
|
*.grd text eol=lf
|
||||||
|
*.grdp text eol=lf
|
||||||
|
*.gn text eol=lf
|
||||||
|
*.gni text eol=lf
|
||||||
|
*.h text eol=lf
|
||||||
|
*.html text eol=lf
|
||||||
|
*.idl text eol=lf
|
||||||
|
*.in text eol=lf
|
||||||
|
*.inc text eol=lf
|
||||||
|
*.java text eol=lf
|
||||||
|
*.js text eol=lf
|
||||||
|
*.json text eol=lf
|
||||||
|
*.json5 text eol=lf
|
||||||
|
*.md text eol=lf
|
||||||
|
*.mm text eol=lf
|
||||||
|
*.mojom text eol=lf
|
||||||
|
*.pdf -diff
|
||||||
|
*.proto text eol=lf
|
||||||
|
*.rs text eol=lf
|
||||||
|
*.sh text eol=lf
|
||||||
|
*.sql text eol=lf
|
||||||
|
*.toml text eol=lf
|
||||||
|
*.txt text eol=lf
|
||||||
|
*.xml text eol=lf
|
||||||
|
*.xslt text eol=lf
|
||||||
|
.clang-format text eol=lf
|
||||||
|
.eslintrc.js text eol=lf
|
||||||
|
.git-blame-ignore-revs text eol=lf
|
||||||
|
.gitattributes text eol=lf
|
||||||
|
.gitignore text eol=lf
|
||||||
|
.vpython text eol=lf
|
||||||
|
codereview.settings text eol=lf
|
||||||
|
DEPS text eol=lf
|
||||||
|
ENG_REVIEW_OWNERS text eol=lf
|
||||||
|
LICENSE text eol=lf
|
||||||
|
LICENSE.* text eol=lf
|
||||||
|
MAJOR_BRANCH_DATE text eol=lf
|
||||||
|
OWNERS text eol=lf
|
||||||
|
README text eol=lf
|
||||||
|
README.* text eol=lf
|
||||||
|
WATCHLISTS text eol=lf
|
||||||
|
VERSION text eol=lf
|
||||||
|
DIR_METADATA text eol=lf
|
||||||
|
|
||||||
|
# Skip Tricium by default on files in third_party.
|
||||||
|
third_party/** -tricium
|
172
src/.gn
Normal file
172
src/.gn
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
# This file is used by the GN meta build system to find the root of the source
|
||||||
|
# tree and to set startup options. For documentation on the values set in this
|
||||||
|
# file, run "gn help dotfile" at the command line.
|
||||||
|
|
||||||
|
import("//build/dotfile_settings.gni")
|
||||||
|
import("//third_party/angle/dotfile_settings.gni")
|
||||||
|
|
||||||
|
# The location of the build configuration file.
|
||||||
|
buildconfig = "//build/config/BUILDCONFIG.gn"
|
||||||
|
|
||||||
|
# The python interpreter to use by default. On Windows, this will look
|
||||||
|
# for python3.exe and python3.bat.
|
||||||
|
script_executable = "python3"
|
||||||
|
|
||||||
|
# These arguments override the default values for items in a declare_args
|
||||||
|
# block. "gn args" in turn can override these.
|
||||||
|
#
|
||||||
|
# In general the value for a build arg in the declare_args block should be the
|
||||||
|
# default. In some cases, a DEPS-ed in project will want different defaults for
|
||||||
|
# being built as part of Chrome vs. being built standalone. In this case, the
|
||||||
|
# Chrome defaults should go here. There should be no overrides here for
|
||||||
|
# values declared in the main Chrome repository.
|
||||||
|
#
|
||||||
|
# Important note for defining defaults: This file is executed before the
|
||||||
|
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
|
||||||
|
# This means that the default_args can not depend on the platform,
|
||||||
|
# architecture, or other build parameters. If you really need that, the other
|
||||||
|
# repo should define a flag that toggles on a behavior that implements the
|
||||||
|
# additional logic required by Chrome to set the variables.
|
||||||
|
default_args = {
|
||||||
|
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
|
||||||
|
# a warning that "Build argument has no effect". When adding a v8 variable, it
|
||||||
|
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
|
||||||
|
# location when it is removed).
|
||||||
|
|
||||||
|
v8_enable_gdbjit = false
|
||||||
|
v8_imminent_deprecation_warnings = false
|
||||||
|
|
||||||
|
# Don't include webrtc's builtin task queue implementation.
|
||||||
|
rtc_link_task_queue_impl = false
|
||||||
|
|
||||||
|
# Don't include the iLBC audio codec.
|
||||||
|
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
|
||||||
|
# deps on codecs, we can remove this.
|
||||||
|
rtc_include_ilbc = false
|
||||||
|
|
||||||
|
# Changes some setup for the Crashpad build to set them to build against
|
||||||
|
# Chromium's zlib, base, etc.
|
||||||
|
crashpad_dependencies = "chromium"
|
||||||
|
|
||||||
|
# Override ANGLE's Vulkan dependencies.
|
||||||
|
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
|
||||||
|
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
|
||||||
|
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
|
||||||
|
angle_vulkan_validation_layers_dir =
|
||||||
|
"//third_party/vulkan-deps/vulkan-validation-layers/src"
|
||||||
|
|
||||||
|
# Overwrite default args declared in the Fuchsia sdk
|
||||||
|
fuchsia_sdk_readelf_exec =
|
||||||
|
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
|
||||||
|
fuchsia_target_api_level = 8
|
||||||
|
|
||||||
|
devtools_visibility = [ "*" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# These are the targets to skip header checking by default. The files in targets
|
||||||
|
# matching these patterns (see "gn help label_pattern" for format) will not have
|
||||||
|
# their includes checked for proper dependencies when you run either
|
||||||
|
# "gn check" or "gn gen --check".
|
||||||
|
no_check_targets = [
|
||||||
|
"//headless:headless_non_renderer", # 9 errors
|
||||||
|
"//headless:headless_renderer", # 13 errors
|
||||||
|
"//headless:headless_shared_sources", # 4 errors
|
||||||
|
"//headless:headless_shell_browser_lib", # 10 errors
|
||||||
|
"//headless:headless_shell_lib", # 10 errors
|
||||||
|
|
||||||
|
# //v8, https://crbug.com/v8/7330
|
||||||
|
"//v8/src/inspector:inspector", # 20 errors
|
||||||
|
"//v8/test/cctest:cctest_sources", # 2 errors
|
||||||
|
"//v8:cppgc_base", # 1 error
|
||||||
|
"//v8:v8_internal_headers", # 11 errors
|
||||||
|
"//v8:v8_libplatform", # 2 errors
|
||||||
|
|
||||||
|
# After making partition_alloc a standalone library, remove partition_alloc
|
||||||
|
# target from the skip list, because partition_aloc will depend on its own
|
||||||
|
# base.
|
||||||
|
# partition alloc standalone library bug is https://crbug.com/1151236.
|
||||||
|
"//base/allocator/partition_allocator:partition_alloc", # 292 errors
|
||||||
|
]
|
||||||
|
|
||||||
|
# These are the list of GN files that run exec_script. This whitelist exists
|
||||||
|
# to force additional review for new uses of exec_script, which is strongly
|
||||||
|
# discouraged.
|
||||||
|
#
|
||||||
|
# PLEASE READ
|
||||||
|
#
|
||||||
|
# You should almost never need to add new exec_script calls. exec_script is
|
||||||
|
# slow, especially on Windows, and can cause confusing effects. Although
|
||||||
|
# individually each call isn't slow or necessarily very confusing, at the scale
|
||||||
|
# of our repo things get out of hand quickly. By strongly pushing back on all
|
||||||
|
# additions, we keep the build fast and clean. If you think you need to add a
|
||||||
|
# new call, please consider:
|
||||||
|
#
|
||||||
|
# - Do not use a script to check for the existence of a file or directory to
|
||||||
|
# enable a different mode. Instead, use GN build args to enable or disable
|
||||||
|
# functionality and set options. An example is checking for a file in the
|
||||||
|
# src-internal repo to see if the corresponding src-internal feature should
|
||||||
|
# be enabled. There are several things that can go wrong with this:
|
||||||
|
#
|
||||||
|
# - It's mysterious what causes some things to happen. Although in many cases
|
||||||
|
# such behavior can be conveniently automatic, GN optimizes for explicit
|
||||||
|
# and obvious behavior so people can more easily diagnose problems.
|
||||||
|
#
|
||||||
|
# - The user can't enable a mode for one build and not another. With GN build
|
||||||
|
# args, the user can choose the exact configuration of multiple builds
|
||||||
|
# using one checkout. But implicitly basing flags on the state of the
|
||||||
|
# checkout, this functionality is broken.
|
||||||
|
#
|
||||||
|
# - It's easy to get stale files. If for example the user edits the gclient
|
||||||
|
# to stop checking out src-internal (or any other optional thing), it's
|
||||||
|
# easy to end up with stale files still mysteriously triggering build
|
||||||
|
# conditions that are no longer appropriate (yes, this happens in real
|
||||||
|
# life).
|
||||||
|
#
|
||||||
|
# - Do not use a script to iterate files in a directory (glob):
|
||||||
|
#
|
||||||
|
# - This has the same "stale file" problem as the above discussion. Various
|
||||||
|
# operations can leave untracked files in the source tree which can cause
|
||||||
|
# surprising effects.
|
||||||
|
#
|
||||||
|
# - It becomes impossible to use "git grep" to find where a certain file is
|
||||||
|
# referenced. This operation is very common and people really do get
|
||||||
|
# confused when things aren't listed.
|
||||||
|
#
|
||||||
|
# - It's easy to screw up. One common case is a build-time script that packs
|
||||||
|
# up a directory. The author notices that the script isn't re-run when the
|
||||||
|
# directory is updated, so adds a glob so all the files are listed as
|
||||||
|
# inputs. This seems to work great... until a file is deleted. When a
|
||||||
|
# file is deleted, all the inputs the glob lists will still be up to date
|
||||||
|
# and no command-lines will have been changed. The action will not be
|
||||||
|
# re-run and the build will be broken. It is possible to get this correct
|
||||||
|
# using glob, and it's possible to mess it up without glob, but globs make
|
||||||
|
# this situation much easier to create. if the build always lists the
|
||||||
|
# files and passes them to a script, it will always be correct.
|
||||||
|
|
||||||
|
exec_script_whitelist =
|
||||||
|
build_dotfile_settings.exec_script_whitelist +
|
||||||
|
angle_dotfile_settings.exec_script_whitelist +
|
||||||
|
[
|
||||||
|
# Whitelist entries for //build should go into
|
||||||
|
# //build/dotfile_settings.gni instead, so that they can be shared
|
||||||
|
# with other repos. The entries in this list should be only for files
|
||||||
|
# in the Chromium repo outside of //build.
|
||||||
|
"//build_overrides/build.gni",
|
||||||
|
|
||||||
|
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
|
||||||
|
|
||||||
|
# TODO(dgn): Layer violation but breaks the build otherwise, see
|
||||||
|
# https://crbug.com/474506.
|
||||||
|
"//clank/java/BUILD.gn",
|
||||||
|
"//clank/native/BUILD.gn",
|
||||||
|
|
||||||
|
"//google_apis/BUILD.gn",
|
||||||
|
"//printing/BUILD.gn",
|
||||||
|
|
||||||
|
"//remoting/host/installer/linux/BUILD.gn",
|
||||||
|
"//remoting/remoting_version.gni",
|
||||||
|
"//remoting/host/installer/win/generate_clsids.gni",
|
||||||
|
|
||||||
|
"//tools/grit/grit_rule.gni",
|
||||||
|
"//tools/gritsettings/BUILD.gn",
|
||||||
|
]
|
1423
src/AUTHORS
Normal file
1423
src/AUTHORS
Normal file
File diff suppressed because it is too large
Load Diff
1707
src/BUILD.gn
Normal file
1707
src/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
27
src/LICENSE
Normal file
27
src/LICENSE
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2015 The Chromium Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4619
src/base/BUILD.gn
Normal file
4619
src/base/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
27
src/base/DEPS
Normal file
27
src/base/DEPS
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
include_rules = [
|
||||||
|
"+third_party/ashmem",
|
||||||
|
"+third_party/apple_apsl",
|
||||||
|
"+third_party/boringssl/src/include",
|
||||||
|
"+third_party/ced",
|
||||||
|
"+third_party/libunwindstack/src/libunwindstack/include",
|
||||||
|
"+third_party/lss",
|
||||||
|
"+third_party/modp_b64",
|
||||||
|
"+third_party/perfetto/include",
|
||||||
|
"+third_party/perfetto/protos/perfetto",
|
||||||
|
# Conversions between base and Rust types (e.g. base::span <-> rust::Slice)
|
||||||
|
# require the cxx.h header from cxx. This is only used if Rust is enabled
|
||||||
|
# in the gn build; see //base/BUILD.gn's conditional dependency on
|
||||||
|
# //build/rust:cxx_cppdeps.
|
||||||
|
"+third_party/rust/cxx",
|
||||||
|
"+third_party/test_fonts",
|
||||||
|
|
||||||
|
# These are implicitly brought in from the root, and we don't want them.
|
||||||
|
"-ipc",
|
||||||
|
"-url",
|
||||||
|
|
||||||
|
# ICU dependendencies must be separate from the rest of base.
|
||||||
|
"-i18n",
|
||||||
|
|
||||||
|
# //base/util can use //base but not vice versa.
|
||||||
|
"-util",
|
||||||
|
]
|
3
src/base/DIR_METADATA
Normal file
3
src/base/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Internals>Core"
|
||||||
|
}
|
40
src/base/OWNERS
Normal file
40
src/base/OWNERS
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# See //base/README.md to find qualification for being an owner.
|
||||||
|
|
||||||
|
set noparent
|
||||||
|
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
danakj@chromium.org
|
||||||
|
dcheng@chromium.org
|
||||||
|
fdoray@chromium.org
|
||||||
|
gab@chromium.org
|
||||||
|
kylechar@chromium.org
|
||||||
|
mark@chromium.org
|
||||||
|
thakis@chromium.org
|
||||||
|
thestig@chromium.org
|
||||||
|
wez@chromium.org
|
||||||
|
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||||
|
# by emailing lsc-policy@chromium.org when this list changes.
|
||||||
|
|
||||||
|
# per-file rules:
|
||||||
|
# These are for the common case of adding or renaming files. If you're doing
|
||||||
|
# structural changes, please get a review from a reviewer in this file.
|
||||||
|
per-file BUILD.gn=*
|
||||||
|
|
||||||
|
# For Android-specific changes:
|
||||||
|
per-file ..._android*=file://base/android/OWNERS
|
||||||
|
|
||||||
|
# For Fuchsia-specific changes:
|
||||||
|
per-file ..._fuchsia*=file://build/fuchsia/OWNERS
|
||||||
|
|
||||||
|
# For Windows-specific changes:
|
||||||
|
per-file ..._win*=file://base/win/OWNERS
|
||||||
|
|
||||||
|
per-file callback_list*=pkasting@chromium.org
|
||||||
|
per-file feature_list*=asvitkine@chromium.org
|
||||||
|
per-file feature_list*=isherman@chromium.org
|
||||||
|
|
||||||
|
# Restricted since rand_util.h also backs the cryptographically secure RNG.
|
||||||
|
per-file rand_util*=set noparent
|
||||||
|
per-file rand_util*=file://ipc/SECURITY_OWNERS
|
||||||
|
|
||||||
|
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS
|
163
src/base/PRESUBMIT.py
Normal file
163
src/base/PRESUBMIT.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
"""Chromium presubmit script for src/base.
|
||||||
|
|
||||||
|
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||||
|
for more details on the presubmit API built into depot_tools.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
USE_PYTHON3 = True
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeLintsClean(input_api, output_api):
|
||||||
|
"""Makes sure that the code is cpplint clean."""
|
||||||
|
# lint_filters=[] stops the OFF_BY_DEFAULT_LINT_FILTERS from being disabled,
|
||||||
|
# finding many more issues. verbose_level=1 finds a small number of additional
|
||||||
|
# issues.
|
||||||
|
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
|
||||||
|
# Only process those extensions which are used in Chromium, in directories
|
||||||
|
# that currently lint clean.
|
||||||
|
CLEAN_CPP_FILES_ONLY = (r'base[\\/]win[\\/].*\.(cc|h)$', )
|
||||||
|
source_file_filter = lambda x: input_api.FilterSourceFile(
|
||||||
|
x,
|
||||||
|
files_to_check=CLEAN_CPP_FILES_ONLY,
|
||||||
|
files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
|
||||||
|
return input_api.canned_checks.CheckChangeLintsClean(
|
||||||
|
input_api, output_api, source_file_filter=source_file_filter,
|
||||||
|
lint_filters=[], verbose_level=1)
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoInterfacesInBase(input_api, output_api):
|
||||||
|
"""Checks to make sure no files in libbase.a have |@interface|."""
|
||||||
|
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
|
||||||
|
files = []
|
||||||
|
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
|
||||||
|
if (f.LocalPath().startswith('base/') and
|
||||||
|
not "/ios/" in f.LocalPath() and
|
||||||
|
not "/test/" in f.LocalPath() and
|
||||||
|
not f.LocalPath().endswith('.java') and
|
||||||
|
not f.LocalPath().endswith('_unittest.mm') and
|
||||||
|
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
|
||||||
|
contents = input_api.ReadFile(f)
|
||||||
|
if pattern.search(contents):
|
||||||
|
files.append(f)
|
||||||
|
|
||||||
|
if len(files):
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Objective-C interfaces or categories are forbidden in libbase. ' +
|
||||||
|
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
|
||||||
|
'browse_thread/thread/efb28c10435987fd',
|
||||||
|
files) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
|
||||||
|
"""Returns locations matching one of the search_regexes."""
|
||||||
|
def FilterFile(affected_file):
|
||||||
|
return input_api.FilterSourceFile(
|
||||||
|
affected_file,
|
||||||
|
files_to_check=files_to_check,
|
||||||
|
files_to_skip=files_to_skip)
|
||||||
|
|
||||||
|
no_presubmit = r"// no-presubmit-check"
|
||||||
|
locations = []
|
||||||
|
for f in input_api.AffectedSourceFiles(FilterFile):
|
||||||
|
for line_num, line in f.ChangedContents():
|
||||||
|
for search_regex in search_regexes:
|
||||||
|
if (input_api.re.search(search_regex, line) and
|
||||||
|
not input_api.re.search(no_presubmit, line)):
|
||||||
|
locations.append(" %s:%d" % (f.LocalPath(), line_num))
|
||||||
|
break
|
||||||
|
return locations
|
||||||
|
|
||||||
|
|
||||||
|
def _CheckNoTraceEventInclude(input_api, output_api):
|
||||||
|
"""Verify that //base includes base_tracing.h instead of trace event headers.
|
||||||
|
|
||||||
|
Checks that files outside trace event implementation include the
|
||||||
|
base_tracing.h header instead of specific trace event implementation headers
|
||||||
|
to maintain compatibility with the gn flag "enable_base_tracing = false".
|
||||||
|
"""
|
||||||
|
discouraged_includes = [
|
||||||
|
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
|
||||||
|
r'^#include "third_party/perfetto/include/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*[\\/]test[\\/].*",
|
||||||
|
r".*[\\/]trace_event[\\/].*",
|
||||||
|
r".*[\\/]tracing[\\/].*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitError(
|
||||||
|
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
|
||||||
|
'of trace_event implementation headers. If you need to include an\n' +
|
||||||
|
'implementation header, verify that "gn check" and base_unittests\n' +
|
||||||
|
'still pass with gn arg "enable_base_tracing = false" and add\n' +
|
||||||
|
'"// no-presubmit-check" after the include. \n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _WarnPbzeroIncludes(input_api, output_api):
|
||||||
|
"""Warn to check enable_base_tracing=false when including a pbzero header.
|
||||||
|
|
||||||
|
Emits a warning when including a perfetto pbzero header, encouraging the
|
||||||
|
user to verify that //base still builds with enable_base_tracing=false.
|
||||||
|
"""
|
||||||
|
warn_includes = [
|
||||||
|
r'^#include "third_party/perfetto/protos/',
|
||||||
|
r'^#include "base/tracing/protos/',
|
||||||
|
]
|
||||||
|
|
||||||
|
files_to_check = [
|
||||||
|
r".*\.(h|cc|mm)$",
|
||||||
|
]
|
||||||
|
files_to_skip = [
|
||||||
|
r".*[\\/]test[\\/].*",
|
||||||
|
r".*[\\/]trace_event[\\/].*",
|
||||||
|
r".*[\\/]tracing[\\/].*",
|
||||||
|
]
|
||||||
|
|
||||||
|
locations = _FindLocations(input_api, warn_includes, files_to_check,
|
||||||
|
files_to_skip)
|
||||||
|
if locations:
|
||||||
|
return [ output_api.PresubmitPromptWarning(
|
||||||
|
'Please verify that "gn check" and base_unittests still pass with\n' +
|
||||||
|
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
|
||||||
|
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
|
||||||
|
'to exclude pbzero headers and anything not supported by\n' +
|
||||||
|
'//base/trace_event/trace_event_stub.h.\n' +
|
||||||
|
'\n'.join(locations)) ]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _CommonChecks(input_api, output_api):
|
||||||
|
"""Checks common to both upload and commit."""
|
||||||
|
results = []
|
||||||
|
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
|
||||||
|
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
|
||||||
|
results.extend(_WarnPbzeroIncludes(input_api, output_api))
|
||||||
|
results.extend(CheckChangeLintsClean(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnUpload(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def CheckChangeOnCommit(input_api, output_api):
|
||||||
|
results = []
|
||||||
|
results.extend(_CommonChecks(input_api, output_api))
|
||||||
|
return results
|
81
src/base/README.md
Normal file
81
src/base/README.md
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# What is this
|
||||||
|
Contains a written down set of principles and other information on //base.
|
||||||
|
Please add to it!
|
||||||
|
|
||||||
|
## About //base:
|
||||||
|
|
||||||
|
Chromium is a very mature project. Most things that are generally useful are
|
||||||
|
already here and things not here aren't generally useful.
|
||||||
|
|
||||||
|
The bar for adding stuff to base is that it must have demonstrated wide
|
||||||
|
applicability. Prefer to add things closer to where they're used (i.e. "not
|
||||||
|
base"), and pull into base only when needed. In a project our size,
|
||||||
|
sometimes even duplication is OK and inevitable.
|
||||||
|
|
||||||
|
Adding a new logging macro `DPVELOG_NE` is not more clear than just
|
||||||
|
writing the stuff you want to log in a regular logging statement, even
|
||||||
|
if it makes your calling code longer. Just add it to your own code.
|
||||||
|
|
||||||
|
If the code in question does not need to be used inside base, but will have
|
||||||
|
multiple consumers across the codebase, consider placing it in a new directory
|
||||||
|
under components/ instead.
|
||||||
|
|
||||||
|
base is written for the Chromium project and is not intended to be used
|
||||||
|
outside it. Using base outside of src.git is explicitly not supported,
|
||||||
|
and base makes no guarantees about API (or even ABI) stability (like all
|
||||||
|
other code in Chromium). New code that depends on base/ must be in
|
||||||
|
src.git. Code that's not in src.git but pulled in through DEPS (for
|
||||||
|
example, v8) cannot use base.
|
||||||
|
|
||||||
|
## Qualifications for being in //base OWNERS
|
||||||
|
* interest and ability to learn low level/high detail/complex c++ stuff
|
||||||
|
* inclination to always ask why and understand everything (including external
|
||||||
|
interactions like win32) rather than just hoping the author did it right
|
||||||
|
* mentorship/experience
|
||||||
|
* demonstrated good judgement (esp with regards to public APIs) over a length
|
||||||
|
of time
|
||||||
|
|
||||||
|
Owners are added when a contributor has shown the above qualifications and
|
||||||
|
when they express interest. There isn't an upper bound on the number of OWNERS.
|
||||||
|
|
||||||
|
## Design and naming
|
||||||
|
* Be sure to use the base namespace.
|
||||||
|
* STL-like constructs should adhere as closely to STL as possible. Functions
|
||||||
|
and behaviors not present in STL should only be added when they are related
|
||||||
|
to the specific data structure implemented by the container.
|
||||||
|
* For STL-like constructs our policy is that they should use STL-like naming
|
||||||
|
even when it may conflict with the style guide. So functions and class names
|
||||||
|
should be lower case with underscores. Non-STL-like classes and functions
|
||||||
|
should use Google naming.
|
||||||
|
|
||||||
|
## Performance testing
|
||||||
|
|
||||||
|
Since the primitives provided by //base are used very widely, it is important to
|
||||||
|
ensure they scale to the necessary workloads and perform well under all
|
||||||
|
supported platforms. The `base_perftests` target is a suite of
|
||||||
|
synthetic microbenchmarks that measure performance in various scenarios:
|
||||||
|
|
||||||
|
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
|
||||||
|
queue in isolation.
|
||||||
|
* ConditionVariablePerfTest: Measures thread switching cost of condition
|
||||||
|
variables.
|
||||||
|
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
|
||||||
|
machinery.
|
||||||
|
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
|
||||||
|
* MessageLoopPerfTest: Measures the speed of task posting in various
|
||||||
|
configurations.
|
||||||
|
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
|
||||||
|
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
|
||||||
|
pthreads.
|
||||||
|
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
|
||||||
|
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
|
||||||
|
underlying task runners.
|
||||||
|
* TaskObserverPerfTest: Measures the incremental cost of adding task
|
||||||
|
observers.
|
||||||
|
* TaskPerfTest: Checks the cost of posting tasks between threads.
|
||||||
|
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
|
||||||
|
multithreaded scenarios.
|
||||||
|
|
||||||
|
Regressions in these benchmarks can generally by caused by 1) operating system
|
||||||
|
changes, 2) compiler version or flag changes or 3) changes in //base code
|
||||||
|
itself.
|
13
src/base/SECURITY_OWNERS
Normal file
13
src/base/SECURITY_OWNERS
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Changes to code that runs at high privilege and which has a high risk of
|
||||||
|
# memory corruption, such as parsers for complex inputs, require a security
|
||||||
|
# review to avoid introducing sandbox escapes.
|
||||||
|
#
|
||||||
|
# Although this file is in base/, it may apply to more than just base, OWNERS
|
||||||
|
# files outside of base may also include this file.
|
||||||
|
#
|
||||||
|
# Security team: If you are uncomfortable reviewing a particular bit of code
|
||||||
|
# yourself, don't hesitate to seek help from another security team member!
|
||||||
|
# Nobody knows everything, and the only way to learn is from experience.
|
||||||
|
dcheng@chromium.org
|
||||||
|
rsesek@chromium.org
|
||||||
|
tsepez@chromium.org
|
96
src/base/allocator/BUILD.gn
Normal file
96
src/base/allocator/BUILD.gn
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/allocator.gni")
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/compiler/compiler.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
|
||||||
|
buildflag_header("buildflags") {
|
||||||
|
header = "buildflags.h"
|
||||||
|
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||||
|
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||||
|
"Partition alloc requires the allocator shim")
|
||||||
|
|
||||||
|
# BackupRefPtr(BRP) build flags.
|
||||||
|
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||||
|
_put_ref_count_in_previous_slot =
|
||||||
|
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||||
|
_enable_backup_ref_ptr_slow_checks =
|
||||||
|
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||||
|
_enable_dangling_raw_ptr_checks =
|
||||||
|
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||||
|
|
||||||
|
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||||
|
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||||
|
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||||
|
"USE_PARTITION_ALLOC=$use_partition_alloc",
|
||||||
|
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||||
|
|
||||||
|
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||||
|
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||||
|
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||||
|
|
||||||
|
# Not to be used directly - see `partition_alloc_config.h`.
|
||||||
|
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||||
|
|
||||||
|
"USE_FAKE_BINARY_EXPERIMENT=$use_fake_binary_experiment",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_apple) {
|
||||||
|
source_set("early_zone_registration_mac") {
|
||||||
|
sources = [
|
||||||
|
"early_zone_registration_mac.cc",
|
||||||
|
"early_zone_registration_mac.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
deps = [
|
||||||
|
":buildflags",
|
||||||
|
"//base/allocator/partition_allocator:buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
|
||||||
|
config("wrap_malloc_symbols") {
|
||||||
|
ldflags = [
|
||||||
|
"-Wl,-wrap,calloc",
|
||||||
|
"-Wl,-wrap,free",
|
||||||
|
"-Wl,-wrap,malloc",
|
||||||
|
"-Wl,-wrap,memalign",
|
||||||
|
"-Wl,-wrap,posix_memalign",
|
||||||
|
"-Wl,-wrap,pvalloc",
|
||||||
|
"-Wl,-wrap,realloc",
|
||||||
|
"-Wl,-wrap,valloc",
|
||||||
|
|
||||||
|
# <stdlib.h> functions
|
||||||
|
"-Wl,-wrap,realpath",
|
||||||
|
|
||||||
|
# <string.h> functions
|
||||||
|
"-Wl,-wrap,strdup",
|
||||||
|
"-Wl,-wrap,strndup",
|
||||||
|
|
||||||
|
# <unistd.h> functions
|
||||||
|
"-Wl,-wrap,getcwd",
|
||||||
|
|
||||||
|
# <stdio.h> functions
|
||||||
|
"-Wl,-wrap,asprintf",
|
||||||
|
"-Wl,-wrap,vasprintf",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("mac_no_default_new_delete_symbols") {
|
||||||
|
if (!is_component_build) {
|
||||||
|
# This is already set when we compile libc++, see
|
||||||
|
# buildtools/third_party/libc++/BUILD.gn. But it needs to be set here as well,
|
||||||
|
# since the shim defines the symbols, to prevent them being exported.
|
||||||
|
cflags = [ "-fvisibility-global-new-delete-hidden" ]
|
||||||
|
}
|
||||||
|
}
|
3
src/base/allocator/DIR_METADATA
Normal file
3
src/base/allocator/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Internals"
|
||||||
|
}
|
8
src/base/allocator/OWNERS
Normal file
8
src/base/allocator/OWNERS
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
lizeb@chromium.org
|
||||||
|
primiano@chromium.org
|
||||||
|
wfh@chromium.org
|
||||||
|
|
||||||
|
per-file allocator.gni=bartekn@chromium.org
|
||||||
|
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org
|
||||||
|
per-file partition_alloc*=bartekn@chromium.org
|
||||||
|
per-file BUILD.gn=bartekn@chromium.org
|
172
src/base/allocator/README.md
Normal file
172
src/base/allocator/README.md
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
This document describes how malloc / new calls are routed in the various Chrome
|
||||||
|
platforms.
|
||||||
|
|
||||||
|
Bare in mind that the chromium codebase does not always just use `malloc()`.
|
||||||
|
Some examples:
|
||||||
|
- Large parts of the renderer (Blink) use two home-brewed allocators,
|
||||||
|
PartitionAlloc and BlinkGC (Oilpan).
|
||||||
|
- Some subsystems, such as the V8 JavaScript engine, handle memory management
|
||||||
|
autonomously.
|
||||||
|
- Various parts of the codebase use abstractions such as `SharedMemory` or
|
||||||
|
`DiscardableMemory` which, similarly to the above, have their own page-level
|
||||||
|
memory management.
|
||||||
|
|
||||||
|
Background
|
||||||
|
----------
|
||||||
|
The `allocator` target defines at compile-time the platform-specific choice of
|
||||||
|
the allocator and extra-hooks which services calls to malloc/new. The relevant
|
||||||
|
build-time flags involved are `use_allocator` and `use_allocator_shim`.
|
||||||
|
|
||||||
|
The default choices are as follows:
|
||||||
|
|
||||||
|
**Windows**
|
||||||
|
`use_allocator: winheap`, the default Windows heap.
|
||||||
|
Additionally, `static_library` (i.e. non-component) builds have a shim
|
||||||
|
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
|
||||||
|
The shim layer provides extra security features, such as preventing large
|
||||||
|
allocations that can hit signed vs. unsigned bugs in third_party code.
|
||||||
|
|
||||||
|
**Android**
|
||||||
|
`use_allocator: none`, always use the allocator symbols coming from Android's
|
||||||
|
libc (Bionic). As it is developed as part of the OS, it is considered to be
|
||||||
|
optimized for small devices and more memory-efficient than other choices.
|
||||||
|
The actual implementation backing malloc symbols in Bionic is up to the board
|
||||||
|
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
|
||||||
|
|
||||||
|
**Mac/iOS**
|
||||||
|
`use_allocator: none`, we always use the system's allocator implementation.
|
||||||
|
|
||||||
|
In addition, when building for `asan` / `msan` both the allocator and the shim
|
||||||
|
layer are disabled.
|
||||||
|
|
||||||
|
|
||||||
|
Layering and build deps
|
||||||
|
-----------------------
|
||||||
|
The `allocator` target provides the linker flags required for the Windows shim
|
||||||
|
layer. The `base` target is (almost) the only one depending on `allocator`. No
|
||||||
|
other targets should depend on it, with the exception of the very few
|
||||||
|
executables / dynamic libraries that don't depend, either directly or
|
||||||
|
indirectly, on `base` within the scope of a linker unit.
|
||||||
|
|
||||||
|
More importantly, **no other place outside of `/base` should depend on the
|
||||||
|
specific allocator**.
|
||||||
|
If such a functional dependency is required that should be achieved using
|
||||||
|
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
|
||||||
|
`/base/memory/`)
|
||||||
|
|
||||||
|
**Why `base` depends on `allocator`?**
|
||||||
|
Because it needs to provide services that depend on the actual allocator
|
||||||
|
implementation. In the past `base` used to pretend to be allocator-agnostic
|
||||||
|
and get the dependencies injected by other layers. This ended up being an
|
||||||
|
inconsistent mess.
|
||||||
|
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
|
||||||
|
|
||||||
|
Linker unit targets (executables and shared libraries) that depend in some way
|
||||||
|
on `base` (most of the targets in the codebase) automatically get the correct
|
||||||
|
set of linker flags to pull in the Windows shim-layer (if needed).
|
||||||
|
|
||||||
|
|
||||||
|
Source code
|
||||||
|
-----------
|
||||||
|
This directory contains just the allocator (i.e. shim) layer that switches
|
||||||
|
between the different underlying memory allocation implementations.
|
||||||
|
|
||||||
|
|
||||||
|
Unified allocator shim
|
||||||
|
----------------------
|
||||||
|
On most platforms, Chrome overrides the malloc / operator new symbols (and
|
||||||
|
corresponding free / delete and other variants). This is to enforce security
|
||||||
|
checks and lately to enable the
|
||||||
|
[memory-infra heap profiler][url-memory-infra-heap-profiler].
|
||||||
|
Historically each platform had its special logic for defining the allocator
|
||||||
|
symbols in different places of the codebase. The unified allocator shim is
|
||||||
|
a project aimed to unify the symbol definition and allocator routing logic in
|
||||||
|
a central place.
|
||||||
|
|
||||||
|
- Full documentation: [Allocator shim design doc][url-allocator-shim].
|
||||||
|
- Current state: Available and enabled by default on Android, CrOS, Linux,
|
||||||
|
Mac OS and Windows.
|
||||||
|
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
|
||||||
|
- Build-time flag: `use_allocator_shim`.
|
||||||
|
|
||||||
|
**Overview of the unified allocator shim**
|
||||||
|
The allocator shim consists of three stages:
|
||||||
|
```
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| malloc & friends | -> | shim layer | -> | Routing to |
|
||||||
|
| symbols definition | | implementation | | allocator |
|
||||||
|
+-------------------------+ +-----------------------+ +----------------+
|
||||||
|
| - libc symbols (malloc, | | - Security checks | | - glibc |
|
||||||
|
| calloc, free, ...) | | - Chain of dispatchers| | - Android |
|
||||||
|
| - C++ symbols (operator | | that can intercept | | bionic |
|
||||||
|
| new, delete, ...) | | and override | | - WinHeap |
|
||||||
|
| - glibc weak symbols | | allocations | | - Partition |
|
||||||
|
| (__libc_malloc, ...) | +-----------------------+ | Alloc |
|
||||||
|
+-------------------------+ +----------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
**1. malloc symbols definition**
|
||||||
|
This stage takes care of overriding the symbols `malloc`, `free`,
|
||||||
|
`operator new`, `operator delete` and friends and routing those calls inside the
|
||||||
|
allocator shim (next point).
|
||||||
|
This is taken care of by the headers in `allocator_shim_override_*`.
|
||||||
|
|
||||||
|
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
|
||||||
|
can override in `allocator_shim_override_ucr_symbols_win.h`.
|
||||||
|
|
||||||
|
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
|
||||||
|
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
|
||||||
|
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
|
||||||
|
`operator delete` and friends).
|
||||||
|
This enables proper interposition of malloc symbols referenced by the main
|
||||||
|
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
|
||||||
|
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
|
||||||
|
The Linux/CrOS shim was introduced by
|
||||||
|
[crrev.com/1675143004](https://crrev.com/1675143004).
|
||||||
|
|
||||||
|
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
|
||||||
|
possible. This is because Android processes are `fork()`-ed from the Android
|
||||||
|
zygote, which pre-loads libc.so and only later native code gets loaded via
|
||||||
|
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
|
||||||
|
scope).
|
||||||
|
In this case, the approach instead of wrapping symbol resolution at link time
|
||||||
|
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
|
||||||
|
The use of this wrapping flag causes:
|
||||||
|
- All references to allocator symbols in the Chrome codebase to be rewritten as
|
||||||
|
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
|
||||||
|
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
|
||||||
|
route allocator calls inside the shim layer.
|
||||||
|
- The reference to the original `malloc` symbols (which typically is defined by
|
||||||
|
the system's libc.so) are accessible via the special `__real_malloc` and
|
||||||
|
friends symbols (which will be relocated, at load time, against `malloc`).
|
||||||
|
|
||||||
|
In summary, this approach is transparent to the dynamic loader, which still sees
|
||||||
|
undefined symbol references to malloc symbols.
|
||||||
|
These symbols will be resolved against libc.so as usual.
|
||||||
|
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
|
||||||
|
|
||||||
|
**2. Shim layer implementation**
|
||||||
|
This stage contains the actual shim implementation. This consists of:
|
||||||
|
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
|
||||||
|
(using the `InsertAllocatorDispatch` API). They can intercept and override
|
||||||
|
allocator calls.
|
||||||
|
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
|
||||||
|
This happens inside `allocator_shim.cc`
|
||||||
|
|
||||||
|
**3. Final allocator routing**
|
||||||
|
The final element of the aforementioned dispatcher chain is statically defined
|
||||||
|
at build time and ultimately routes the allocator calls to the actual allocator
|
||||||
|
(as described in the *Background* section above). This is taken care of by the
|
||||||
|
headers in `allocator_shim_default_dispatch_to_*` files.
|
||||||
|
|
||||||
|
|
||||||
|
Related links
|
||||||
|
-------------
|
||||||
|
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
|
||||||
|
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
|
||||||
|
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
|
||||||
|
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
|
||||||
|
|
||||||
|
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
|
||||||
|
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
|
||||||
|
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
|
150
src/base/allocator/allocator.gni
Normal file
150
src/base/allocator/allocator.gni
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//build/config/chromecast_build.gni")
|
||||||
|
import("//build/config/sanitizers/sanitizers.gni")
|
||||||
|
|
||||||
|
if (is_ios) {
|
||||||
|
import("//build/config/ios/ios_sdk.gni")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sanitizers replace the allocator, don't use our own.
|
||||||
|
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
|
||||||
|
|
||||||
|
# - Component build support is disabled on all platforms. It is known to cause
|
||||||
|
# issues on some (e.g. Windows with shims, Android with non-universal symbol
|
||||||
|
# wrapping), and has not been validated on others.
|
||||||
|
# - Windows: debug CRT is not compatible, see below.
|
||||||
|
_disable_partition_alloc = is_component_build || (is_win && is_debug)
|
||||||
|
|
||||||
|
# - NaCl: No plans to support it.
|
||||||
|
# - iOS: not done yet.
|
||||||
|
_is_partition_alloc_platform = !is_nacl && !is_ios
|
||||||
|
|
||||||
|
# Under Windows Debug the allocator shim is not compatible with CRT.
|
||||||
|
# NaCl in particular does seem to link some binaries statically
|
||||||
|
# against the debug CRT with "is_nacl=false".
|
||||||
|
# Under Fuchsia the allocator shim is only required for PA-E.
|
||||||
|
# For all other platforms & configurations, the shim is required, to replace
|
||||||
|
# the default system allocators, e.g. with Partition Alloc.
|
||||||
|
if ((is_linux || is_chromeos || is_android || is_apple ||
|
||||||
|
(is_fuchsia && !_disable_partition_alloc) ||
|
||||||
|
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
|
||||||
|
_default_use_allocator_shim = true
|
||||||
|
} else {
|
||||||
|
_default_use_allocator_shim = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
|
||||||
|
!_disable_partition_alloc) {
|
||||||
|
_default_allocator = "partition"
|
||||||
|
} else {
|
||||||
|
_default_allocator = "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Memory allocator to use. Set to "none" to use default allocator.
|
||||||
|
use_allocator = _default_allocator
|
||||||
|
|
||||||
|
# Causes all the allocations to be routed via allocator_shim.cc.
|
||||||
|
use_allocator_shim = _default_use_allocator_shim
|
||||||
|
|
||||||
|
# Whether PartitionAlloc should be available for use or not.
|
||||||
|
# true makes PartitionAlloc linked to the executable or shared library and
|
||||||
|
# makes it available for use. It doesn't mean that the default allocator
|
||||||
|
# is PartitionAlloc, which is governed by |use_allocator|.
|
||||||
|
#
|
||||||
|
# This flag is currently set to false only on Cronet bots, because Cronet
|
||||||
|
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
|
||||||
|
# size increase (crbug.com/674570).
|
||||||
|
use_partition_alloc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_partition_alloc && use_allocator == "partition") {
|
||||||
|
# If there is a conflict, prioritize |use_partition_alloc| over
|
||||||
|
# |use_allocator|.
|
||||||
|
use_allocator = "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(use_allocator == "none" || use_allocator == "partition")
|
||||||
|
|
||||||
|
assert(
|
||||||
|
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
|
||||||
|
is_fuchsia || is_apple,
|
||||||
|
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
|
||||||
|
"and Windows.")
|
||||||
|
|
||||||
|
if (is_win && use_allocator_shim) {
|
||||||
|
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
|
||||||
|
assert(!is_component_build,
|
||||||
|
"The allocator shim doesn't work for the component build on Windows.")
|
||||||
|
}
|
||||||
|
|
||||||
|
_is_brp_supported = (is_win || is_android) && use_allocator == "partition"
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
|
||||||
|
# of raw_ptr<T>, and enable PartitionAlloc support for it.
|
||||||
|
use_backup_ref_ptr = _is_brp_supported
|
||||||
|
|
||||||
|
use_mte_checked_ptr = false
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
|
||||||
|
"MTECheckedPtr conflicts with BRP.")
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
# If BRP is enabled, additional options are available:
|
||||||
|
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
|
||||||
|
# previous slot (or in metadata if a slot starts on the page boundary), as
|
||||||
|
# opposed to the beginning of the slot.
|
||||||
|
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
|
||||||
|
# are too expensive to have on by default.
|
||||||
|
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
|
||||||
|
# dangling during their lifetime.
|
||||||
|
put_ref_count_in_previous_slot = use_backup_ref_ptr
|
||||||
|
enable_backup_ref_ptr_slow_checks = false
|
||||||
|
enable_dangling_raw_ptr_checks = false
|
||||||
|
|
||||||
|
# Registers the binary for a fake binary A/B experiment. The binaries built
|
||||||
|
# with this flag have no behavior difference, except for setting a synthetic
|
||||||
|
# Finch.
|
||||||
|
use_fake_binary_experiment = false
|
||||||
|
|
||||||
|
use_asan_backup_ref_ptr = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
|
||||||
|
# In theory, such a configuration is possible, but its scope would be limited to
|
||||||
|
# only Blink partitions, which is currently not tested. Better to trigger an
|
||||||
|
# error, than have BackupRefPtr silently disabled while believing it is enabled.
|
||||||
|
if (!is_nacl) {
|
||||||
|
assert(!use_backup_ref_ptr || use_allocator == "partition",
|
||||||
|
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
|
||||||
|
}
|
||||||
|
|
||||||
|
# put_ref_count_in_previous_slot can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(
|
||||||
|
use_backup_ref_ptr || !put_ref_count_in_previous_slot,
|
||||||
|
"Can't put ref count in the previous slot if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# enable_backup_ref_ptr_slow_checks can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(use_backup_ref_ptr || !enable_backup_ref_ptr_slow_checks,
|
||||||
|
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
|
||||||
|
|
||||||
|
# enable_dangling_raw_ptr_checks can only be used if use_backup_ref_ptr
|
||||||
|
# is true.
|
||||||
|
assert(
|
||||||
|
use_backup_ref_ptr || !enable_dangling_raw_ptr_checks,
|
||||||
|
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
|
||||||
|
|
||||||
|
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
|
||||||
|
assert(
|
||||||
|
!use_backup_ref_ptr || !use_asan_backup_ref_ptr,
|
||||||
|
"Both BackupRefPtr and AsanBackupRefPtr can't be enabled at the same time")
|
||||||
|
|
||||||
|
assert(!use_asan_backup_ref_ptr || is_asan,
|
||||||
|
"AsanBackupRefPtr requires AddressSanitizer")
|
40
src/base/allocator/allocator_check.cc
Normal file
40
src/base/allocator/allocator_check.cc
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_check.h"
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
bool IsAllocatorInitialized() {
|
||||||
|
#if BUILDFLAG(IS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||||
|
// Set by allocator_shim_override_ucrt_symbols_win.h when the
|
||||||
|
// shimmed _set_new_mode() is called.
|
||||||
|
return g_is_win_shim_layer_initialized;
|
||||||
|
#elif BUILDFLAG(IS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
|
||||||
|
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// From allocator_interception_mac.mm.
|
||||||
|
return base::allocator::g_replaced_default_zone;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
18
src/base/allocator/allocator_check.h
Normal file
18
src/base/allocator/allocator_check.h
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
BASE_EXPORT bool IsAllocatorInitialized();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
15
src/base/allocator/allocator_extension.cc
Normal file
15
src/base/allocator/allocator_extension.cc
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_extension.h"
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void ReleaseFreeMemory() {}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
23
src/base/allocator/allocator_extension.h
Normal file
23
src/base/allocator/allocator_extension.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||||
|
|
||||||
|
#include <stddef.h> // for size_t
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
// Request that the allocator release any free memory it knows about to the
|
||||||
|
// system.
|
||||||
|
BASE_EXPORT void ReleaseFreeMemory();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
65
src/base/allocator/allocator_interception_mac.h
Normal file
65
src/base/allocator/allocator_interception_mac.h
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
struct MallocZoneFunctions;
|
||||||
|
|
||||||
|
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
|
||||||
|
// the functions in the current default malloc zone. This must be called before
|
||||||
|
// the default malloc zone is changed to have its intended effect.
|
||||||
|
void InitializeDefaultDispatchToMacAllocator();
|
||||||
|
|
||||||
|
// Saves the function pointers currently used by the default zone.
|
||||||
|
void StoreFunctionsForDefaultZone();
|
||||||
|
|
||||||
|
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
|
||||||
|
void StoreFunctionsForAllZones();
|
||||||
|
|
||||||
|
// For all malloc zones that have been stored, replace their functions with
|
||||||
|
// |functions|.
|
||||||
|
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
extern bool g_replaced_default_zone;
|
||||||
|
|
||||||
|
// Calls the original implementation of malloc/calloc prior to interception.
|
||||||
|
bool UncheckedMallocMac(size_t size, void** result);
|
||||||
|
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
|
||||||
|
|
||||||
|
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
|
||||||
|
// Foundation and Objective-C allocations.
|
||||||
|
// Has no effect on the default malloc zone if the allocator shim already
|
||||||
|
// performs that interception.
|
||||||
|
BASE_EXPORT void InterceptAllocationsMac();
|
||||||
|
|
||||||
|
// Updates all malloc zones to use their original functions.
|
||||||
|
// Also calls ClearAllMallocZonesForTesting.
|
||||||
|
BASE_EXPORT void UninterceptMallocZonesForTesting();
|
||||||
|
|
||||||
|
// Returns true if allocations are successfully being intercepted for all malloc
|
||||||
|
// zones.
|
||||||
|
bool AreMallocZonesIntercepted();
|
||||||
|
|
||||||
|
// Periodically checks for, and shims new malloc zones. Stops checking after 1
|
||||||
|
// minute.
|
||||||
|
BASE_EXPORT void PeriodicallyShimNewMallocZones();
|
||||||
|
|
||||||
|
// Exposed for testing.
|
||||||
|
BASE_EXPORT void ShimNewMallocZones();
|
||||||
|
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
613
src/base/allocator/allocator_interception_mac.mm
Normal file
613
src/base/allocator/allocator_interception_mac.mm
Normal file
@ -0,0 +1,613 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file contains all the logic necessary to intercept allocations on
|
||||||
|
// macOS. "malloc zones" are an abstraction that allows the process to intercept
|
||||||
|
// all malloc-related functions. There is no good mechanism [short of
|
||||||
|
// interposition] to determine new malloc zones are added, so there's no clean
|
||||||
|
// mechanism to intercept all malloc zones. This file contains logic to
|
||||||
|
// intercept the default and purgeable zones, which always exist. A cursory
|
||||||
|
// review of Chrome seems to imply that non-default zones are almost never used.
|
||||||
|
//
|
||||||
|
// This file also contains logic to intercept Core Foundation and Objective-C
|
||||||
|
// allocations. The implementations forward to the default malloc zone, so the
|
||||||
|
// only reason to intercept these calls is to re-label OOM crashes with slightly
|
||||||
|
// more details.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
|
||||||
|
#include <CoreFoundation/CoreFoundation.h>
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#import <objc/runtime.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
#include "base/bind.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/logging.h"
|
||||||
|
#include "base/mac/mach_logging.h"
|
||||||
|
#include "base/process/memory.h"
|
||||||
|
#include "base/threading/sequenced_task_runner_handle.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "third_party/apple_apsl/CFBase.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
#include "base/ios/ios_util.h"
|
||||||
|
#else
|
||||||
|
#include "base/mac/mac_util.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
bool g_replaced_default_zone = false;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
bool g_oom_killer_enabled;
|
||||||
|
bool g_allocator_shims_failed_to_install;
|
||||||
|
|
||||||
|
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
|
||||||
|
// read-only, to prevent them from being overwritten in an attack. However,
|
||||||
|
// blindly unprotecting and reprotecting the zone allocators fails with
|
||||||
|
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
|
||||||
|
// memory in its bss. Explicit saving/restoring of the protection is required.
|
||||||
|
//
|
||||||
|
// This function takes a pointer to a malloc zone, de-protects it if necessary,
|
||||||
|
// and returns (in the out parameters) a region of memory (if any) to be
|
||||||
|
// re-protected when modifications are complete. This approach assumes that
|
||||||
|
// there is no contention for the protection of this memory.
|
||||||
|
//
|
||||||
|
// Returns true if the malloc zone was properly de-protected, or false
|
||||||
|
// otherwise. If this function returns false, the out parameters are invalid and
|
||||||
|
// the region does not need to be re-protected.
|
||||||
|
bool DeprotectMallocZone(ChromeMallocZone* default_zone,
|
||||||
|
vm_address_t* reprotection_start,
|
||||||
|
vm_size_t* reprotection_length,
|
||||||
|
vm_prot_t* reprotection_value) {
|
||||||
|
mach_port_t unused;
|
||||||
|
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
|
||||||
|
struct vm_region_basic_info_64 info;
|
||||||
|
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
||||||
|
kern_return_t result =
|
||||||
|
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
|
||||||
|
VM_REGION_BASIC_INFO_64,
|
||||||
|
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
MACH_LOG(ERROR, result) << "vm_region_64";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
||||||
|
// balance it with a deallocate in case this ever changes. See
|
||||||
|
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
|
||||||
|
mach_port_deallocate(mach_task_self(), unused);
|
||||||
|
|
||||||
|
if (!(info.max_protection & VM_PROT_WRITE)) {
|
||||||
|
LOG(ERROR) << "Invalid max_protection " << info.max_protection;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does the region fully enclose the zone pointers? Possibly unwarranted
|
||||||
|
// simplification used: using the size of a full version 10 malloc zone rather
|
||||||
|
// than the actual smaller size if the passed-in zone is not version 10.
|
||||||
|
DCHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
|
||||||
|
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
|
||||||
|
reinterpret_cast<vm_address_t>(*reprotection_start);
|
||||||
|
DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
||||||
|
|
||||||
|
if (info.protection & VM_PROT_WRITE) {
|
||||||
|
// No change needed; the zone is already writable.
|
||||||
|
*reprotection_start = 0;
|
||||||
|
*reprotection_length = 0;
|
||||||
|
*reprotection_value = VM_PROT_NONE;
|
||||||
|
} else {
|
||||||
|
*reprotection_value = info.protection;
|
||||||
|
result =
|
||||||
|
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
|
||||||
|
false, info.protection | VM_PROT_WRITE);
|
||||||
|
if (result != KERN_SUCCESS) {
|
||||||
|
MACH_LOG(ERROR, result) << "vm_protect";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
MallocZoneFunctions g_old_zone;
|
||||||
|
MallocZoneFunctions g_old_purgeable_zone;
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_zone.malloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_calloc(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_zone.calloc(zone, num_items, size);
|
||||||
|
if (!result && num_items && size)
|
||||||
|
TerminateBecauseOutOfMemory(num_items * size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_zone.valloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
|
||||||
|
g_old_zone.free(zone, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
void* result = g_old_zone.realloc(zone, ptr, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_memalign(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_zone.memalign(zone, alignment, size);
|
||||||
|
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||||
|
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||||
|
if (!result && size && alignment >= sizeof(void*) &&
|
||||||
|
base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.malloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
|
||||||
|
if (!result && num_items && size)
|
||||||
|
TerminateBecauseOutOfMemory(num_items * size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.valloc(zone, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
|
||||||
|
g_old_purgeable_zone.free(zone, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
|
||||||
|
if (!result && size)
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
|
||||||
|
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||||
|
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||||
|
if (!result && size && alignment >= sizeof(void*) &&
|
||||||
|
base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
// === Core Foundation CFAllocators ===
|
||||||
|
|
||||||
|
bool CanGetContextForCFAllocator() {
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
return !base::ios::IsRunningOnOrLater(16, 0, 0);
|
||||||
|
#else
|
||||||
|
return !base::mac::IsOSLaterThan12_DontCallThis();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
|
||||||
|
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
|
||||||
|
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
||||||
|
return &our_allocator->_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
|
||||||
|
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(alloc_size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(alloc_size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
|
||||||
|
CFOptionFlags hint,
|
||||||
|
void* info) {
|
||||||
|
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(alloc_size);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
// === Cocoa NSObject allocation ===
|
||||||
|
|
||||||
|
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
|
||||||
|
allocWithZone_t g_old_allocWithZone;
|
||||||
|
|
||||||
|
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
|
||||||
|
id result = g_old_allocWithZone(self, _cmd, zone);
|
||||||
|
if (!result)
|
||||||
|
TerminateBecauseOutOfMemory(0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
|
||||||
|
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
|
||||||
|
if (!IsMallocZoneAlreadyStored(chrome_zone))
|
||||||
|
return;
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
|
||||||
|
ReplaceZoneFunctions(chrome_zone, &functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool UncheckedMallocMac(size_t size, void** result) {
|
||||||
|
#if defined(ADDRESS_SANITIZER)
|
||||||
|
*result = malloc(size);
|
||||||
|
#else
|
||||||
|
if (g_old_zone.malloc) {
|
||||||
|
*result = g_old_zone.malloc(malloc_default_zone(), size);
|
||||||
|
} else {
|
||||||
|
*result = malloc(size);
|
||||||
|
}
|
||||||
|
#endif // defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
return *result != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
|
||||||
|
#if defined(ADDRESS_SANITIZER)
|
||||||
|
*result = calloc(num_items, size);
|
||||||
|
#else
|
||||||
|
if (g_old_zone.calloc) {
|
||||||
|
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
|
||||||
|
} else {
|
||||||
|
*result = calloc(num_items, size);
|
||||||
|
}
|
||||||
|
#endif // defined(ADDRESS_SANITIZER)
|
||||||
|
|
||||||
|
return *result != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeDefaultDispatchToMacAllocator() {
|
||||||
|
StoreFunctionsForAllZones();
|
||||||
|
}
|
||||||
|
|
||||||
|
void StoreFunctionsForDefaultZone() {
|
||||||
|
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
|
||||||
|
malloc_default_zone());
|
||||||
|
StoreMallocZone(default_zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
void StoreFunctionsForAllZones() {
|
||||||
|
// This ensures that the default zone is always at the front of the array,
|
||||||
|
// which is important for performance.
|
||||||
|
StoreFunctionsForDefaultZone();
|
||||||
|
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||||
|
if (kr != KERN_SUCCESS)
|
||||||
|
return;
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||||
|
StoreMallocZone(zone);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
|
||||||
|
// The default zone does not get returned in malloc_get_all_zones().
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
|
||||||
|
ReplaceZoneFunctions(default_zone, functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
|
||||||
|
if (kr != KERN_SUCCESS)
|
||||||
|
return;
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||||
|
if (DoesMallocZoneNeedReplacing(zone, functions)) {
|
||||||
|
ReplaceZoneFunctions(zone, functions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g_replaced_default_zone = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterceptAllocationsMac() {
|
||||||
|
if (g_oom_killer_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
g_oom_killer_enabled = true;
|
||||||
|
|
||||||
|
// === C malloc/calloc/valloc/realloc/posix_memalign ===
|
||||||
|
|
||||||
|
// This approach is not perfect, as requests for amounts of memory larger than
|
||||||
|
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
|
||||||
|
// fail with a NULL rather than dying (see malloc_zone_malloc() in
|
||||||
|
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
|
||||||
|
// details). Unfortunately, it's the best we can do. Also note that this does
|
||||||
|
// not affect allocations from non-default zones.
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER)
|
||||||
|
// Don't do anything special on OOM for the malloc zones replaced by
|
||||||
|
// AddressSanitizer, as modifying or protecting them may not work correctly.
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// The malloc zone backed by PartitionAlloc crashes by default, so there is
|
||||||
|
// no need to install the OOM killer.
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
if (!IsMallocZoneAlreadyStored(default_zone)) {
|
||||||
|
StoreZoneFunctions(default_zone, &g_old_zone);
|
||||||
|
MallocZoneFunctions new_functions = {};
|
||||||
|
new_functions.malloc = oom_killer_malloc;
|
||||||
|
new_functions.calloc = oom_killer_calloc;
|
||||||
|
new_functions.valloc = oom_killer_valloc;
|
||||||
|
new_functions.free = oom_killer_free;
|
||||||
|
new_functions.realloc = oom_killer_realloc;
|
||||||
|
new_functions.memalign = oom_killer_memalign;
|
||||||
|
|
||||||
|
ReplaceZoneFunctions(default_zone, &new_functions);
|
||||||
|
g_replaced_default_zone = true;
|
||||||
|
}
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
ChromeMallocZone* purgeable_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
|
||||||
|
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
|
||||||
|
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
|
||||||
|
MallocZoneFunctions new_functions = {};
|
||||||
|
new_functions.malloc = oom_killer_malloc_purgeable;
|
||||||
|
new_functions.calloc = oom_killer_calloc_purgeable;
|
||||||
|
new_functions.valloc = oom_killer_valloc_purgeable;
|
||||||
|
new_functions.free = oom_killer_free_purgeable;
|
||||||
|
new_functions.realloc = oom_killer_realloc_purgeable;
|
||||||
|
new_functions.memalign = oom_killer_memalign_purgeable;
|
||||||
|
ReplaceZoneFunctions(purgeable_zone, &new_functions);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// === C malloc_zone_batch_malloc ===
|
||||||
|
|
||||||
|
// batch_malloc is omitted because the default malloc zone's implementation
|
||||||
|
// only supports batch_malloc for "tiny" allocations from the free list. It
|
||||||
|
// will fail for allocations larger than "tiny", and will only allocate as
|
||||||
|
// many blocks as it's able to from the free list. These factors mean that it
|
||||||
|
// can return less than the requested memory even in a non-out-of-memory
|
||||||
|
// situation. There's no good way to detect whether a batch_malloc failure is
|
||||||
|
// due to these other factors, or due to genuine memory or address space
|
||||||
|
// exhaustion. The fact that it only allocates space from the "tiny" free list
|
||||||
|
// means that it's likely that a failure will not be due to memory exhaustion.
|
||||||
|
// Similarly, these constraints on batch_malloc mean that callers must always
|
||||||
|
// be expecting to receive less memory than was requested, even in situations
|
||||||
|
// where memory pressure is not a concern. Finally, the only public interface
|
||||||
|
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
|
||||||
|
// system's malloc implementation. It's unlikely that anyone's even heard of
|
||||||
|
// it.
|
||||||
|
|
||||||
|
#ifndef ADDRESS_SANITIZER
|
||||||
|
// === Core Foundation CFAllocators ===
|
||||||
|
|
||||||
|
// This will not catch allocation done by custom allocators, but will catch
|
||||||
|
// all allocation done by system-provided ones.
|
||||||
|
|
||||||
|
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
|
||||||
|
!g_old_cfallocator_malloc_zone)
|
||||||
|
<< "Old allocators unexpectedly non-null";
|
||||||
|
|
||||||
|
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
|
||||||
|
|
||||||
|
if (cf_allocator_internals_known) {
|
||||||
|
CFAllocatorContext* context =
|
||||||
|
ContextForCFAllocator(kCFAllocatorSystemDefault);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
|
||||||
|
g_old_cfallocator_system_default = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_system_default)
|
||||||
|
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_system_default;
|
||||||
|
|
||||||
|
context = ContextForCFAllocator(kCFAllocatorMalloc);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
|
||||||
|
g_old_cfallocator_malloc = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_malloc)
|
||||||
|
<< "Failed to get kCFAllocatorMalloc allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_malloc;
|
||||||
|
|
||||||
|
context = ContextForCFAllocator(kCFAllocatorMallocZone);
|
||||||
|
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
|
||||||
|
g_old_cfallocator_malloc_zone = context->allocate;
|
||||||
|
CHECK(g_old_cfallocator_malloc_zone)
|
||||||
|
<< "Failed to get kCFAllocatorMallocZone allocation function.";
|
||||||
|
context->allocate = oom_killer_cfallocator_malloc_zone;
|
||||||
|
} else {
|
||||||
|
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
|
||||||
|
"failures via CFAllocator will not result in termination. "
|
||||||
|
"http://crbug.com/45650";
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// === Cocoa NSObject allocation ===
|
||||||
|
|
||||||
|
// Note that both +[NSObject new] and +[NSObject alloc] call through to
|
||||||
|
// +[NSObject allocWithZone:].
|
||||||
|
|
||||||
|
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
|
||||||
|
|
||||||
|
Class nsobject_class = [NSObject class];
|
||||||
|
Method orig_method =
|
||||||
|
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
|
||||||
|
g_old_allocWithZone =
|
||||||
|
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
|
||||||
|
CHECK(g_old_allocWithZone)
|
||||||
|
<< "Failed to get allocWithZone allocation function.";
|
||||||
|
method_setImplementation(orig_method,
|
||||||
|
reinterpret_cast<IMP>(oom_killer_allocWithZone));
|
||||||
|
}
|
||||||
|
|
||||||
|
void UninterceptMallocZonesForTesting() {
|
||||||
|
UninterceptMallocZoneForTesting(malloc_default_zone());
|
||||||
|
vm_address_t* zones;
|
||||||
|
unsigned int count;
|
||||||
|
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||||
|
CHECK(kr == KERN_SUCCESS);
|
||||||
|
for (unsigned int i = 0; i < count; ++i) {
|
||||||
|
UninterceptMallocZoneForTesting(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
ClearAllMallocZonesForTesting();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AreMallocZonesIntercepted() {
|
||||||
|
return !g_allocator_shims_failed_to_install;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void ShimNewMallocZonesAndReschedule(base::Time end_time,
|
||||||
|
base::TimeDelta delay) {
|
||||||
|
ShimNewMallocZones();
|
||||||
|
|
||||||
|
if (base::Time::Now() > end_time)
|
||||||
|
return;
|
||||||
|
|
||||||
|
base::TimeDelta next_delay = delay * 2;
|
||||||
|
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE,
|
||||||
|
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
|
||||||
|
delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void PeriodicallyShimNewMallocZones() {
|
||||||
|
base::Time end_time = base::Time::Now() + base::Minutes(1);
|
||||||
|
base::TimeDelta initial_delay = base::Seconds(1);
|
||||||
|
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ShimNewMallocZones() {
|
||||||
|
StoreFunctionsForAllZones();
|
||||||
|
|
||||||
|
// Use the functions for the default zone as a template to replace those
|
||||||
|
// new zones.
|
||||||
|
ChromeMallocZone* default_zone =
|
||||||
|
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||||
|
DCHECK(IsMallocZoneAlreadyStored(default_zone));
|
||||||
|
|
||||||
|
MallocZoneFunctions new_functions;
|
||||||
|
StoreZoneFunctions(default_zone, &new_functions);
|
||||||
|
ReplaceFunctionsForStoredZones(&new_functions);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions) {
|
||||||
|
// Remove protection.
|
||||||
|
vm_address_t reprotection_start = 0;
|
||||||
|
vm_size_t reprotection_length = 0;
|
||||||
|
vm_prot_t reprotection_value = VM_PROT_NONE;
|
||||||
|
bool success = DeprotectMallocZone(zone, &reprotection_start,
|
||||||
|
&reprotection_length, &reprotection_value);
|
||||||
|
if (!success) {
|
||||||
|
g_allocator_shims_failed_to_install = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||||
|
functions->free && functions->realloc);
|
||||||
|
zone->malloc = functions->malloc;
|
||||||
|
zone->calloc = functions->calloc;
|
||||||
|
zone->valloc = functions->valloc;
|
||||||
|
zone->free = functions->free;
|
||||||
|
zone->realloc = functions->realloc;
|
||||||
|
if (functions->batch_malloc)
|
||||||
|
zone->batch_malloc = functions->batch_malloc;
|
||||||
|
if (functions->batch_free)
|
||||||
|
zone->batch_free = functions->batch_free;
|
||||||
|
if (functions->size)
|
||||||
|
zone->size = functions->size;
|
||||||
|
if (zone->version >= 5 && functions->memalign) {
|
||||||
|
zone->memalign = functions->memalign;
|
||||||
|
}
|
||||||
|
if (zone->version >= 6 && functions->free_definite_size) {
|
||||||
|
zone->free_definite_size = functions->free_definite_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore protection if it was active.
|
||||||
|
if (reprotection_start) {
|
||||||
|
kern_return_t result =
|
||||||
|
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
|
||||||
|
false, reprotection_value);
|
||||||
|
MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
418
src/base/allocator/allocator_shim.cc
Normal file
418
src/base/allocator/allocator_shim.cc
Normal file
@ -0,0 +1,418 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
|
||||||
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/check_op.h"
|
||||||
|
#include "base/memory/page_size.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_WIN)
|
||||||
|
#include <unistd.h>
|
||||||
|
#else
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#include "base/mac/mach_logging.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// No calls to malloc / new in this file. They would would cause re-entrancy of
|
||||||
|
// the shim, which is hard to deal with. Keep this code as simple as possible
|
||||||
|
// and don't use any external C++ object here, not even //base ones. Even if
|
||||||
|
// they are safe to use today, in future they might be refactored.
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
|
||||||
|
&base::allocator::AllocatorDispatch::default_dispatch};
|
||||||
|
|
||||||
|
bool g_call_new_handler_on_malloc_failure = false;
|
||||||
|
|
||||||
|
ALWAYS_INLINE size_t GetCachedPageSize() {
|
||||||
|
static size_t pagesize = 0;
|
||||||
|
if (!pagesize)
|
||||||
|
pagesize = base::GetPageSize();
|
||||||
|
return pagesize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls the std::new handler thread-safely. Returns true if a new_handler was
|
||||||
|
// set and called, false if no new_handler was set.
|
||||||
|
bool CallNewHandler(size_t size) {
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
return base::allocator::WinCallNewHandler(size);
|
||||||
|
#else
|
||||||
|
std::new_handler nh = std::get_new_handler();
|
||||||
|
if (!nh)
|
||||||
|
return false;
|
||||||
|
(*nh)();
|
||||||
|
// Assume the new_handler will abort if it fails. Exception are disabled and
|
||||||
|
// we don't support the case of a new_handler throwing std::bad_balloc.
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
|
||||||
|
return g_chain_head.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void SetCallNewHandlerOnMallocFailure(bool value) {
|
||||||
|
g_call_new_handler_on_malloc_failure = value;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
base::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(value);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void* UncheckedAlloc(size_t size) {
|
||||||
|
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UncheckedFree(void* ptr) {
|
||||||
|
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
|
||||||
|
// Loop in case of (an unlikely) race on setting the list head.
|
||||||
|
size_t kMaxRetries = 7;
|
||||||
|
for (size_t i = 0; i < kMaxRetries; ++i) {
|
||||||
|
const AllocatorDispatch* chain_head = GetChainHead();
|
||||||
|
dispatch->next = chain_head;
|
||||||
|
|
||||||
|
// This function guarantees to be thread-safe w.r.t. concurrent
|
||||||
|
// insertions. It also has to guarantee that all the threads always
|
||||||
|
// see a consistent chain, hence the atomic_thread_fence() below.
|
||||||
|
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
|
||||||
|
// we don't really want this to be a release-store with a corresponding
|
||||||
|
// acquire-load during malloc().
|
||||||
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||||
|
// Set the chain head to the new dispatch atomically. If we lose the race,
|
||||||
|
// retry.
|
||||||
|
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
|
||||||
|
std::memory_order_relaxed,
|
||||||
|
std::memory_order_relaxed)) {
|
||||||
|
// Success.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK(false); // Too many retries, this shouldn't happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
|
||||||
|
DCHECK_EQ(GetChainHead(), dispatch);
|
||||||
|
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
// The Shim* functions below are the entry-points into the shim-layer and
|
||||||
|
// are supposed to be invoked by the allocator_shim_override_*
|
||||||
|
// headers to route the malloc / new symbols through the shim layer.
|
||||||
|
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
|
||||||
|
// between the system-defined entry points and the shim implementations.
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// The general pattern for allocations is:
|
||||||
|
// - Try to allocate, if succeded return the pointer.
|
||||||
|
// - If the allocation failed:
|
||||||
|
// - Call the std::new_handler if it was a C++ allocation.
|
||||||
|
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
|
||||||
|
// AND SetCallNewHandlerOnMallocFailure(true).
|
||||||
|
// - If the std::new_handler is NOT set just return nullptr.
|
||||||
|
// - If the std::new_handler is set:
|
||||||
|
// - Assume it will abort() if it fails (very likely the new_handler will
|
||||||
|
// just suicide printing a message).
|
||||||
|
// - Assume it did succeed if it returns, in which case reattempt the alloc.
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppNew(size_t size) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||||
|
} while (!ptr && CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->alloc_unchecked_function(chain_head, size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimCppDelete(void* address) {
|
||||||
|
void* context = nullptr;
|
||||||
|
#if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
context = malloc_default_zone();
|
||||||
|
#endif
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
|
||||||
|
// realloc(size == 0) means free() and might return a nullptr. We should
|
||||||
|
// not call the std::new_handler in that case, though.
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->realloc_function(chain_head, address, size, context);
|
||||||
|
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
|
||||||
|
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
|
||||||
|
// in tc_malloc.cc.
|
||||||
|
if (((alignment % sizeof(void*)) != 0) ||
|
||||||
|
!base::bits::IsPowerOfTwo(alignment)) {
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
void* ptr = ShimMemalign(alignment, size, nullptr);
|
||||||
|
*res = ptr;
|
||||||
|
return ptr ? 0 : ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
|
||||||
|
return ShimMemalign(GetCachedPageSize(), size, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
|
||||||
|
// pvalloc(0) should allocate one page, according to its man page.
|
||||||
|
if (size == 0) {
|
||||||
|
size = GetCachedPageSize();
|
||||||
|
} else {
|
||||||
|
size = base::bits::AlignUp(size, GetCachedPageSize());
|
||||||
|
}
|
||||||
|
// The third argument is nullptr because pvalloc is glibc only and does not
|
||||||
|
// exist on OSX/BSD systems.
|
||||||
|
return ShimMemalign(GetCachedPageSize(), size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimFree(void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->get_size_estimate_function(
|
||||||
|
chain_head, const_cast<void*>(address), context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->batch_malloc_function(chain_head, size, results,
|
||||||
|
num_requested, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->batch_free_function(chain_head, to_be_freed,
|
||||||
|
num_to_be_freed, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->free_definite_size_function(chain_head, ptr, size,
|
||||||
|
context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr = nullptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
|
||||||
|
context);
|
||||||
|
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
// _aligned_realloc(size == 0) means _aligned_free() and might return a
|
||||||
|
// nullptr. We should not call the std::new_handler in that case, though.
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
void* ptr = nullptr;
|
||||||
|
do {
|
||||||
|
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
|
||||||
|
alignment, context);
|
||||||
|
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||||
|
CallNewHandler(size));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
|
||||||
|
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||||
|
return chain_head->aligned_free_function(chain_head, address, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_WIN) && \
|
||||||
|
!(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
|
||||||
|
// Cpp symbols (new / delete) should always be routed through the shim layer
|
||||||
|
// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
|
||||||
|
// malloc intercept is deep enough that it also catches the cpp calls.
|
||||||
|
//
|
||||||
|
// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
|
||||||
|
// base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
|
||||||
|
// in case of operator new() noexcept. Thus, operator new() noexcept needs to
|
||||||
|
// be routed to base::internal::PartitionMallocUnchecked through the shim layer.
|
||||||
|
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
// Android does not support symbol interposition. The way malloc symbols are
|
||||||
|
// intercepted on Android is by using link-time -wrap flags.
|
||||||
|
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
|
||||||
|
#elif BUILDFLAG(IS_WIN)
|
||||||
|
// On Windows we use plain link-time overriding of the CRT symbols.
|
||||||
|
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_mac_default_zone.h"
|
||||||
|
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_mac_symbols.h"
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#else
|
||||||
|
#include "base/allocator/allocator_shim_override_libc_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
|
||||||
|
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
|
||||||
|
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
|
||||||
|
// as we are then asked to free memory we didn't allocate.
|
||||||
|
//
|
||||||
|
// This only happened in glibc to allocate TLS storage metadata, and there are
|
||||||
|
// no other callers of __libc_memalign() there as of September 2020. To work
|
||||||
|
// around this issue, intercept this internal libc symbol to make sure that both
|
||||||
|
// the allocation and the free() are caught by the shim.
|
||||||
|
//
|
||||||
|
// This seems fragile, and is, but there is ample precedent for it, making it
|
||||||
|
// quite likely to keep working in the future. For instance, LLVM for LSAN uses
|
||||||
|
// this mechanism.
|
||||||
|
|
||||||
|
#if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void InitializeAllocatorShim() {
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Prepares the default dispatch. After the intercepted malloc calls have
|
||||||
|
// traversed the shim this will route them to the default malloc zone.
|
||||||
|
InitializeDefaultDispatchToMacAllocator();
|
||||||
|
|
||||||
|
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
|
||||||
|
|
||||||
|
// This replaces the default malloc zone, causing calls to malloc & friends
|
||||||
|
// from the codebase to be routed to ShimMalloc() above.
|
||||||
|
base::allocator::ReplaceFunctionsForStoredZones(&functions);
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Cross-checks.
|
||||||
|
|
||||||
|
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
#error The allocator shim should not be compiled when building for memory tools.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
|
||||||
|
(defined(_MSC_VER) && defined(_CPPUNWIND))
|
||||||
|
#error This code cannot be used when exceptions are turned on.
|
||||||
|
#endif
|
200
src/base/allocator/allocator_shim.h
Normal file
200
src/base/allocator/allocator_shim.h
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/types/strong_alias.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_ALLOW_PCSCAN)
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
// Allocator Shim API. Allows to:
|
||||||
|
// - Configure the behavior of the allocator (what to do on OOM failures).
|
||||||
|
// - Install new hooks (AllocatorDispatch) in the allocator chain.
|
||||||
|
|
||||||
|
// When this shim layer is enabled, the route of an allocation is as-follows:
|
||||||
|
//
|
||||||
|
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
|
||||||
|
// The override_* headers define the symbols required to intercept calls to
|
||||||
|
// malloc() and operator new (if not overridden by specific C++ classes).
|
||||||
|
//
|
||||||
|
// [allocator_shim.cc] Routing allocation calls to the shim:
|
||||||
|
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
|
||||||
|
// ShimCppNew() etc. methods defined in allocator_shim.cc.
|
||||||
|
// These methods will: (1) forward the allocation call to the front of the
|
||||||
|
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
|
||||||
|
// call std::new_handler on OOM failure).
|
||||||
|
//
|
||||||
|
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
|
||||||
|
// It is a singly linked list where each element is a struct with function
|
||||||
|
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
|
||||||
|
// consists of a single AllocatorDispatch element, herein called
|
||||||
|
// the "default dispatch", which is statically defined at build time and
|
||||||
|
// ultimately routes the calls to the actual allocator defined by the build
|
||||||
|
// config (glibc, ...).
|
||||||
|
//
|
||||||
|
// It is possible to dynamically insert further AllocatorDispatch stages
|
||||||
|
// to the front of the chain, for debugging / profiling purposes.
|
||||||
|
//
|
||||||
|
// All the functions must be thread safe. The shim does not enforce any
|
||||||
|
// serialization. This is to route to thread-aware allocators without
|
||||||
|
// introducing unnecessary perf hits.
|
||||||
|
|
||||||
|
struct AllocatorDispatch {
|
||||||
|
using AllocFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AllocAlignedFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using ReallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using FreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
// Returns the allocated size of user data (not including heap overhead).
|
||||||
|
// Can be larger than the requested size.
|
||||||
|
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context);
|
||||||
|
using BatchFreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context);
|
||||||
|
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
using AlignedMallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
using AlignedReallocFn = void*(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
using AlignedFreeFn = void(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
AllocFn* const alloc_function;
|
||||||
|
AllocUncheckedFn* const alloc_unchecked_function;
|
||||||
|
AllocZeroInitializedFn* const alloc_zero_initialized_function;
|
||||||
|
AllocAlignedFn* const alloc_aligned_function;
|
||||||
|
ReallocFn* const realloc_function;
|
||||||
|
FreeFn* const free_function;
|
||||||
|
GetSizeEstimateFn* const get_size_estimate_function;
|
||||||
|
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
|
||||||
|
// and iOS allocators.
|
||||||
|
BatchMallocFn* const batch_malloc_function;
|
||||||
|
BatchFreeFn* const batch_free_function;
|
||||||
|
FreeDefiniteSizeFn* const free_definite_size_function;
|
||||||
|
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
|
||||||
|
// Windows allocator.
|
||||||
|
AlignedMallocFn* const aligned_malloc_function;
|
||||||
|
AlignedReallocFn* const aligned_realloc_function;
|
||||||
|
AlignedFreeFn* const aligned_free_function;
|
||||||
|
|
||||||
|
const AllocatorDispatch* next;
|
||||||
|
|
||||||
|
// |default_dispatch| is statically defined by one (and only one) of the
|
||||||
|
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
|
||||||
|
// configuration.
|
||||||
|
static const AllocatorDispatch default_dispatch;
|
||||||
|
};
|
||||||
|
|
||||||
|
// When true makes malloc behave like new, w.r.t calling the new_handler if
|
||||||
|
// the allocation fails (see set_new_mode() in Windows).
|
||||||
|
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
|
||||||
|
|
||||||
|
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
|
||||||
|
// regardless of SetCallNewHandlerOnMallocFailure().
|
||||||
|
BASE_EXPORT void* UncheckedAlloc(size_t size);
|
||||||
|
|
||||||
|
// Frees memory allocated with UncheckedAlloc().
|
||||||
|
BASE_EXPORT void UncheckedFree(void* ptr);
|
||||||
|
|
||||||
|
// Inserts |dispatch| in front of the allocator chain. This method is
|
||||||
|
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
|
||||||
|
// The callers have responsibility for inserting a single dispatch no more
|
||||||
|
// than once.
|
||||||
|
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
|
||||||
|
|
||||||
|
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
|
||||||
|
// removal of arbitrary elements from a singly linked list would require a lock
|
||||||
|
// in malloc(), which we really don't want.
|
||||||
|
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_WIN)
|
||||||
|
// Configures the allocator for the caller's allocation domain. Allocations that
|
||||||
|
// take place prior to this configuration step will succeed, but will not
|
||||||
|
// benefit from its one-time mitigations. As such, this function must be called
|
||||||
|
// as early as possible during startup.
|
||||||
|
BASE_EXPORT void ConfigurePartitionAlloc();
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
void InitializeDefaultAllocatorPartitionRoot();
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// On macOS, the allocator shim needs to be turned on during runtime.
|
||||||
|
BASE_EXPORT void InitializeAllocatorShim();
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
|
||||||
|
|
||||||
|
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
|
||||||
|
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
|
||||||
|
using UseDedicatedAlignedPartition =
|
||||||
|
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
|
||||||
|
using AlternateBucketDistribution =
|
||||||
|
base::StrongAlias<class AlternateBucketDistributionTag, bool>;
|
||||||
|
|
||||||
|
// If |thread_cache_on_non_quarantinable_partition| is specified, the
|
||||||
|
// thread-cache will be enabled on the non-quarantinable partition. The
|
||||||
|
// thread-cache on the main (malloc) partition will be disabled.
|
||||||
|
BASE_EXPORT void ConfigurePartitions(
|
||||||
|
EnableBrp enable_brp,
|
||||||
|
SplitMainPartition split_main_partition,
|
||||||
|
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||||
|
AlternateBucketDistribution use_alternate_bucket_distribution);
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
BASE_EXPORT void EnablePCScan(base::internal::PCScan::InitConfig);
|
||||||
|
#endif
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
122
src/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
Normal file
122
src/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/numerics/checked_math.h"
|
||||||
|
#include "base/process/memory.h"
|
||||||
|
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
// This translation unit defines a default dispatch for the allocator shim which
|
||||||
|
// routes allocations to libc functions.
|
||||||
|
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
void* __libc_malloc(size_t size);
|
||||||
|
void* __libc_calloc(size_t n, size_t size);
|
||||||
|
void* __libc_realloc(void* address, size_t size);
|
||||||
|
void* __libc_memalign(size_t alignment, size_t size);
|
||||||
|
void __libc_free(void* ptr);
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
// Strictly speaking, it would make more sense to not subtract amything, but
|
||||||
|
// other shims limit to something lower than INT_MAX (which is 0x7FFFFFFF on
|
||||||
|
// most platforms), and tests expect that.
|
||||||
|
constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
|
||||||
|
|
||||||
|
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
// Cannot force glibc's malloc() to crash when a large size is requested, do
|
||||||
|
// it in the shim instead.
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return __libc_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
const auto total = base::CheckMul(n, size);
|
||||||
|
if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size * n);
|
||||||
|
|
||||||
|
return __libc_calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_realloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
if (UNLIKELY(size >= kMaxAllowedSize))
|
||||||
|
base::TerminateBecauseOutOfMemory(size);
|
||||||
|
|
||||||
|
return __libc_memalign(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
|
||||||
|
__libc_free(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
NO_SANITIZE("cfi-icall")
|
||||||
|
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
|
||||||
|
// resolve it instead. This should be safe because glibc (and hence dlfcn)
|
||||||
|
// does not use malloc_size internally and so there should not be a risk of
|
||||||
|
// recursion.
|
||||||
|
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
|
||||||
|
static MallocUsableSizeFunction fn_ptr =
|
||||||
|
reinterpret_cast<MallocUsableSizeFunction>(
|
||||||
|
dlsym(RTLD_NEXT, "malloc_usable_size"));
|
||||||
|
|
||||||
|
return fn_ptr(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&GlibcMalloc, /* alloc_function */
|
||||||
|
&GlibcUncheckedMalloc, /* alloc_unchecked_function */
|
||||||
|
&GlibcCalloc, /* alloc_zero_initialized_function */
|
||||||
|
&GlibcMemalign, /* alloc_aligned_function */
|
||||||
|
&GlibcRealloc, /* realloc_function */
|
||||||
|
&GlibcFree, /* free_function */
|
||||||
|
&GlibcGetSizeEstimate, /* get_size_estimate_function */
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// This translation unit defines a default dispatch for the allocator shim which
|
||||||
|
// routes allocations to the original libc functions when using the link-time
|
||||||
|
// -Wl,-wrap,malloc approach (see README.md).
|
||||||
|
// The __real_X functions here are special symbols that the linker will relocate
|
||||||
|
// against the real "X" undefined symbol, so that __real_malloc becomes the
|
||||||
|
// equivalent of what an undefined malloc symbol reference would have been.
|
||||||
|
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
|
||||||
|
// which routes the __wrap_X functions into the shim.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
void* __real_malloc(size_t);
|
||||||
|
void* __real_calloc(size_t, size_t);
|
||||||
|
void* __real_realloc(void*, size_t);
|
||||||
|
void* __real_memalign(size_t, size_t);
|
||||||
|
void __real_free(void*);
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
return __real_malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_realloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* RealMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return __real_memalign(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RealFree(const AllocatorDispatch*, void* address, void* context) {
|
||||||
|
__real_free(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&RealMalloc, /* alloc_function */
|
||||||
|
&RealMalloc, /* alloc_unchecked_function */
|
||||||
|
&RealCalloc, /* alloc_zero_initialized_function */
|
||||||
|
&RealMemalign, /* alloc_aligned_function */
|
||||||
|
&RealRealloc, /* realloc_function */
|
||||||
|
&RealFree, /* free_function */
|
||||||
|
nullptr, /* get_size_estimate_function */
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_interception_mac.h"
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* CallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MemalignImpl(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* ReallocImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
ptr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned BatchMallocImpl(const AllocatorDispatch* self,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
return functions.batch_malloc(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
|
||||||
|
num_requested);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchFreeImpl(const AllocatorDispatch* self,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||||
|
to_be_freed, num_to_be_freed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||||
|
functions.free_definite_size(
|
||||||
|
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&MallocImpl, /* alloc_function */
|
||||||
|
&MallocImpl, /* alloc_unchecked_function */
|
||||||
|
&CallocImpl, /* alloc_zero_initialized_function */
|
||||||
|
&MemalignImpl, /* alloc_aligned_function */
|
||||||
|
&ReallocImpl, /* realloc_function */
|
||||||
|
&FreeImpl, /* free_function */
|
||||||
|
&GetSizeEstimateImpl, /* get_size_estimate_function */
|
||||||
|
&BatchMallocImpl, /* batch_malloc_function */
|
||||||
|
&BatchFreeImpl, /* batch_free_function */
|
||||||
|
&FreeDefiniteSizeImpl, /* free_definite_size_function */
|
||||||
|
nullptr, /* aligned_malloc_function */
|
||||||
|
nullptr, /* aligned_realloc_function */
|
||||||
|
nullptr, /* aligned_free_function */
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
@ -0,0 +1,794 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/memory/nonscannable_memory.h"
|
||||||
|
#include "base/numerics/checked_math.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
#include "build/chromecast_buildflags.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class SimpleScopedSpinLocker {
|
||||||
|
public:
|
||||||
|
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
|
||||||
|
// Lock. Semantically equivalent to base::Lock::Acquire().
|
||||||
|
bool expected = false;
|
||||||
|
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
|
||||||
|
// in this case we don't imply any ordering.
|
||||||
|
//
|
||||||
|
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
|
||||||
|
while (!lock_.compare_exchange_weak(
|
||||||
|
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||||
|
expected = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic<bool>& lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// We can't use a "static local" or a base::LazyInstance, as:
|
||||||
|
// - static local variables call into the runtime on Windows, which is not
|
||||||
|
// prepared to handle it, as the first allocation happens during CRT init.
|
||||||
|
// - We don't want to depend on base::LazyInstance, which may be converted to
|
||||||
|
// static locals one day.
|
||||||
|
//
|
||||||
|
// Nevertheless, this provides essentially the same thing.
|
||||||
|
template <typename T, typename Constructor>
|
||||||
|
class LeakySingleton {
|
||||||
|
public:
|
||||||
|
constexpr LeakySingleton() = default;
|
||||||
|
|
||||||
|
ALWAYS_INLINE T* Get() {
|
||||||
|
auto* instance = instance_.load(std::memory_order_acquire);
|
||||||
|
if (LIKELY(instance))
|
||||||
|
return instance;
|
||||||
|
|
||||||
|
return GetSlowPath();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces the instance pointer with a new one.
|
||||||
|
void Replace(T* new_instance) {
|
||||||
|
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||||
|
|
||||||
|
// Modify under the lock to avoid race between |if (instance)| and
|
||||||
|
// |instance_.store()| in GetSlowPath().
|
||||||
|
instance_.store(new_instance, std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* GetSlowPath();
|
||||||
|
|
||||||
|
std::atomic<T*> instance_;
|
||||||
|
// Before C++20, having an initializer here causes a "variable does not have a
|
||||||
|
// constant initializer" error. In C++20, omitting it causes a similar error.
|
||||||
|
// Presumably this is due to the C++20 changes to make atomic initialization
|
||||||
|
// (of the other members of this class) sane, so guarding under that
|
||||||
|
// feature-test.
|
||||||
|
#if !defined(__cpp_lib_atomic_value_initialization) || \
|
||||||
|
__cpp_lib_atomic_value_initialization < 201911L
|
||||||
|
alignas(T) uint8_t instance_buffer_[sizeof(T)];
|
||||||
|
#else
|
||||||
|
alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
|
||||||
|
#endif
|
||||||
|
std::atomic<bool> initialization_lock_;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T, typename Constructor>
|
||||||
|
T* LeakySingleton<T, Constructor>::GetSlowPath() {
|
||||||
|
// The instance has not been set, the proper way to proceed (correct
|
||||||
|
// double-checked locking) is:
|
||||||
|
//
|
||||||
|
// auto* instance = instance_.load(std::memory_order_acquire);
|
||||||
|
// if (!instance) {
|
||||||
|
// ScopedLock initialization_lock;
|
||||||
|
// root = instance_.load(std::memory_order_relaxed);
|
||||||
|
// if (root)
|
||||||
|
// return root;
|
||||||
|
// instance = Create new root;
|
||||||
|
// instance_.store(instance, std::memory_order_release);
|
||||||
|
// return instance;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, we don't want to use a base::Lock here, so instead we use
|
||||||
|
// compare-and-exchange on a lock variable, which provides the same
|
||||||
|
// guarantees.
|
||||||
|
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||||
|
|
||||||
|
T* instance = instance_.load(std::memory_order_relaxed);
|
||||||
|
// Someone beat us.
|
||||||
|
if (instance)
|
||||||
|
return instance;
|
||||||
|
|
||||||
|
instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
|
||||||
|
instance_.store(instance, std::memory_order_release);
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
class MainPartitionConstructor {
|
||||||
|
public:
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||||
|
constexpr base::PartitionOptions::ThreadCache thread_cache =
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Additional partitions may be created in ConfigurePartitions(). Since
|
||||||
|
// only one partition can have thread cache enabled, postpone the
|
||||||
|
// decision to turn the thread cache on until after that call.
|
||||||
|
// TODO(bartekn): Enable it here by default, once the "split-only" mode
|
||||||
|
// is no longer needed.
|
||||||
|
base::PartitionOptions::ThreadCache::kDisabled;
|
||||||
|
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// Other tests, such as the ThreadCache tests create a thread cache,
|
||||||
|
// and only one is supported at a time.
|
||||||
|
base::PartitionOptions::ThreadCache::kDisabled;
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
|
||||||
|
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||||
|
thread_cache,
|
||||||
|
base::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
base::PartitionOptions::Cookie::kAllowed,
|
||||||
|
base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
|
||||||
|
return new_root;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||||
|
MainPartitionConstructor>
|
||||||
|
g_root CONSTINIT = {};
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* Allocator() {
|
||||||
|
return g_root.Get();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Original g_root_ if it was replaced by ConfigurePartitions().
|
||||||
|
std::atomic<partition_alloc::ThreadSafePartitionRoot*> g_original_root(nullptr);
|
||||||
|
|
||||||
|
class AlignedPartitionConstructor {
|
||||||
|
public:
|
||||||
|
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
|
||||||
|
return g_root.Get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
|
||||||
|
AlignedPartitionConstructor>
|
||||||
|
g_aligned_root CONSTINIT = {};
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
|
||||||
|
return g_original_root.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* AlignedAllocator() {
|
||||||
|
return g_aligned_root.Get();
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
bool IsRunning32bitEmulatedOnArm64() {
|
||||||
|
using IsWow64Process2Function = decltype(&IsWow64Process2);
|
||||||
|
|
||||||
|
IsWow64Process2Function is_wow64_process2 =
|
||||||
|
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
|
||||||
|
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
|
||||||
|
if (!is_wow64_process2)
|
||||||
|
return false;
|
||||||
|
USHORT process_machine;
|
||||||
|
USHORT native_machine;
|
||||||
|
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
|
||||||
|
&native_machine);
|
||||||
|
if (!retval)
|
||||||
|
return false;
|
||||||
|
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
|
||||||
|
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
|
||||||
|
size_t g_extra_bytes;
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
|
||||||
|
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
|
||||||
|
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
|
||||||
|
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
|
||||||
|
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
return size;
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocateAlignedMemory(size_t alignment, size_t size) {
|
||||||
|
// Memory returned by the regular allocator *always* respects |kAlignment|,
|
||||||
|
// which is a power of two, and any valid alignment is also a power of two. So
|
||||||
|
// we can directly fulfill these requests with the main allocator.
|
||||||
|
//
|
||||||
|
// This has several advantages:
|
||||||
|
// - The thread cache is supported on the main partition
|
||||||
|
// - Reduced fragmentation
|
||||||
|
// - Better coverage for MiraclePtr variants requiring extras
|
||||||
|
//
|
||||||
|
// There are several call sites in Chromium where base::AlignedAlloc is called
|
||||||
|
// with a small alignment. Some may be due to overly-careful code, some are
|
||||||
|
// because the client code doesn't know the required alignment at compile
|
||||||
|
// time.
|
||||||
|
//
|
||||||
|
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
|
||||||
|
// instance) directly call PartitionFree(), so there is no risk of
|
||||||
|
// mismatch. (see below the default_dispatch definition).
|
||||||
|
if (alignment <= partition_alloc::internal::kAlignment) {
|
||||||
|
// This is mandated by |posix_memalign()| and friends, so should never fire.
|
||||||
|
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
|
||||||
|
// TODO(bartekn): See if the compiler optimizes branches down the stack on
|
||||||
|
// Mac, where PartitionPageSize() isn't constexpr.
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
0, size, partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
return AlignedAllocator()->AlignedAllocWithFlags(
|
||||||
|
partition_alloc::AllocFlags::kNoHooks, alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
int g_alloc_flags = 0;
|
||||||
|
#else
|
||||||
|
constexpr int g_alloc_flags = 0;
|
||||||
|
#endif
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// We generally prefer to always crash rather than returning nullptr for
|
||||||
|
// OOM. However, on some macOS releases, we have to locally allow it due to
|
||||||
|
// weirdness in OS code. See https://crbug.com/654695 for details.
|
||||||
|
//
|
||||||
|
// Apple only since it's not needed elsewhere, and there is a performance
|
||||||
|
// penalty.
|
||||||
|
|
||||||
|
if (value)
|
||||||
|
g_alloc_flags = 0;
|
||||||
|
else
|
||||||
|
g_alloc_flags = partition_alloc::AllocFlags::kReturnNull;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
0 | g_alloc_flags, MaybeAdjustSize(size),
|
||||||
|
partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMallocUnchecked(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
|
||||||
|
MaybeAdjustSize(size), partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionCalloc(const AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
|
||||||
|
return Allocator()->AllocWithFlagsNoHooks(
|
||||||
|
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
|
||||||
|
partition_alloc::PartitionPageSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionMemalign(const AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return AllocateAlignedMemory(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
return AllocateAlignedMemory(alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// aligned_realloc documentation is
|
||||||
|
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
|
||||||
|
// TODO(tasak): Expand the given memory block to the given size if possible.
|
||||||
|
// This realloc always free the original memory block and allocates a new memory
|
||||||
|
// block.
|
||||||
|
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocWithFlags
|
||||||
|
// and use it.
|
||||||
|
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
void* new_ptr = nullptr;
|
||||||
|
if (size > 0) {
|
||||||
|
size = MaybeAdjustSize(size);
|
||||||
|
new_ptr = AllocateAlignedMemory(alignment, size);
|
||||||
|
} else {
|
||||||
|
// size == 0 and address != null means just "free(address)".
|
||||||
|
if (address)
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
// The original memory block (specified by address) is unchanged if ENOMEM.
|
||||||
|
if (!new_ptr)
|
||||||
|
return nullptr;
|
||||||
|
// TODO(tasak): Need to compare the new alignment with the address' alignment.
|
||||||
|
// If the two alignments are not the same, need to return nullptr with EINVAL.
|
||||||
|
if (address) {
|
||||||
|
size_t usage =
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||||
|
size_t copy_size = usage > size ? size : usage;
|
||||||
|
memcpy(new_ptr, address, copy_size);
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
return new_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* PartitionRealloc(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(address)) &&
|
||||||
|
address)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `realloc` which supports zone-
|
||||||
|
// dispatching so that it appropriately selects the right zone.
|
||||||
|
return realloc(address, size);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
return Allocator()->ReallocWithFlags(
|
||||||
|
partition_alloc::AllocFlags::kNoHooks | g_alloc_flags, address,
|
||||||
|
MaybeAdjustSize(size), "");
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
extern "C" {
|
||||||
|
void __real_free(void*);
|
||||||
|
} // extern "C"
|
||||||
|
#endif // BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
|
||||||
|
void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// TODO(bartekn): Add MTE unmasking here (and below).
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(object)) &&
|
||||||
|
object)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `free` which supports zone-
|
||||||
|
// dispatching so that it appropriately selects the right zone.
|
||||||
|
return free(object);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// On Android Chromecast devices, there is at least one case where a system
|
||||||
|
// malloc() pointer can be passed to PartitionAlloc's free(). If we don't own
|
||||||
|
// the pointer, pass it along. This should not have a runtime cost vs regular
|
||||||
|
// Android, since on Android we have a PA_CHECK() rather than the branch here.
|
||||||
|
#if BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(object)) &&
|
||||||
|
object)) {
|
||||||
|
// A memory region allocated by the system allocator is passed in this
|
||||||
|
// function. Forward the request to `free()`, which is `__real_free()`
|
||||||
|
// here.
|
||||||
|
return __real_free(object);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_CAST_ANDROID)
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// Normal free() path on Apple OSes:
|
||||||
|
// 1. size = GetSizeEstimate(ptr);
|
||||||
|
// 2. if (size) FreeDefiniteSize(ptr, size)
|
||||||
|
//
|
||||||
|
// So we don't need to re-check that the pointer is owned in Free(), and we
|
||||||
|
// can use the size.
|
||||||
|
void PartitionFreeDefiniteSize(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
ScopedDisallowAllocations guard{};
|
||||||
|
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
|
||||||
|
// still useful though, as we avoid double-checking that the address is owned.
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
// This is used to implement malloc_usable_size(3). Per its man page, "if ptr
|
||||||
|
// is NULL, 0 is returned".
|
||||||
|
if (!address)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
if (!partition_alloc::IsManagedByPartitionAlloc(
|
||||||
|
reinterpret_cast<uintptr_t>(address))) {
|
||||||
|
// The object pointed to by `address` is not allocated by the
|
||||||
|
// PartitionAlloc. The return value `0` means that the pointer does not
|
||||||
|
// belong to this malloc zone.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// TODO(lizeb): Returns incorrect values for aligned allocations.
|
||||||
|
const size_t size =
|
||||||
|
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// The object pointed to by `address` is allocated by the PartitionAlloc.
|
||||||
|
// So, this function must not return zero so that the malloc zone dispatcher
|
||||||
|
// finds the appropriate malloc zone.
|
||||||
|
PA_DCHECK(size);
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned PartitionBatchMalloc(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested,
|
||||||
|
void* context) {
|
||||||
|
// No real batching: we could only acquire the lock once for instance, keep it
|
||||||
|
// simple for now.
|
||||||
|
for (unsigned i = 0; i < num_requested; i++) {
|
||||||
|
// No need to check the results, we crash if it fails.
|
||||||
|
results[i] = PartitionMalloc(nullptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Either all succeeded, or we crashed.
|
||||||
|
return num_requested;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionBatchFree(const AllocatorDispatch*,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed,
|
||||||
|
void* context) {
|
||||||
|
// No real batching: we could only acquire the lock once for instance, keep it
|
||||||
|
// simple for now.
|
||||||
|
for (unsigned i = 0; i < num_to_be_freed; i++) {
|
||||||
|
PartitionFree(nullptr, to_be_freed[i], nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
|
||||||
|
return ::Allocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
|
||||||
|
return ::OriginalAllocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
|
||||||
|
return ::AlignedAllocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void EnablePartitionAllocMemoryReclaimer() {
|
||||||
|
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
|
||||||
|
// their PartitionRoots to the memory reclaimer, because doing so may allocate
|
||||||
|
// memory. Thus, the registration to the memory reclaimer has to be done
|
||||||
|
// some time later, when the main root is fully configured.
|
||||||
|
// TODO(bartekn): Aligned allocator can use the regular initialization path.
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
Allocator());
|
||||||
|
auto* original_root = OriginalAllocator();
|
||||||
|
if (original_root)
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
original_root);
|
||||||
|
if (AlignedAllocator() != Allocator()) {
|
||||||
|
::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||||
|
AlignedAllocator());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||||
|
g_allocator_buffer_for_new_main_partition[sizeof(
|
||||||
|
partition_alloc::ThreadSafePartitionRoot)];
|
||||||
|
|
||||||
|
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
|
||||||
|
g_allocator_buffer_for_aligned_alloc_partition[sizeof(
|
||||||
|
partition_alloc::ThreadSafePartitionRoot)];
|
||||||
|
|
||||||
|
void ConfigurePartitions(
|
||||||
|
EnableBrp enable_brp,
|
||||||
|
SplitMainPartition split_main_partition,
|
||||||
|
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||||
|
AlternateBucketDistribution use_alternate_bucket_distribution) {
|
||||||
|
// BRP cannot be enabled without splitting the main partition. Furthermore, in
|
||||||
|
// the "before allocation" mode, it can't be enabled without further splitting
|
||||||
|
// out the aligned partition.
|
||||||
|
PA_CHECK(!enable_brp || split_main_partition);
|
||||||
|
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||||
|
PA_CHECK(!enable_brp || use_dedicated_aligned_partition);
|
||||||
|
#endif
|
||||||
|
// Can't split out the aligned partition, without splitting the main one.
|
||||||
|
PA_CHECK(!use_dedicated_aligned_partition || split_main_partition);
|
||||||
|
|
||||||
|
static bool configured = false;
|
||||||
|
PA_CHECK(!configured);
|
||||||
|
configured = true;
|
||||||
|
|
||||||
|
// Calling Get() is actually important, even if the return values weren't
|
||||||
|
// used, because it has a side effect of initializing the variables, if they
|
||||||
|
// weren't already.
|
||||||
|
auto* current_root = g_root.Get();
|
||||||
|
auto* current_aligned_root = g_aligned_root.Get();
|
||||||
|
|
||||||
|
if (!split_main_partition) {
|
||||||
|
if (!use_alternate_bucket_distribution) {
|
||||||
|
current_root->SwitchToDenserBucketDistribution();
|
||||||
|
current_aligned_root->SwitchToDenserBucketDistribution();
|
||||||
|
}
|
||||||
|
PA_DCHECK(!enable_brp);
|
||||||
|
PA_DCHECK(!use_dedicated_aligned_partition);
|
||||||
|
PA_DCHECK(!current_root->flags.with_thread_cache);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto* new_root =
|
||||||
|
new (g_allocator_buffer_for_new_main_partition) ThreadSafePartitionRoot({
|
||||||
|
!use_dedicated_aligned_partition
|
||||||
|
? base::PartitionOptions::AlignedAlloc::kAllowed
|
||||||
|
: base::PartitionOptions::AlignedAlloc::kDisallowed,
|
||||||
|
base::PartitionOptions::ThreadCache::kDisabled,
|
||||||
|
base::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
base::PartitionOptions::Cookie::kAllowed,
|
||||||
|
enable_brp ? base::PartitionOptions::BackupRefPtr::kEnabled
|
||||||
|
: base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
|
||||||
|
partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
|
||||||
|
if (use_dedicated_aligned_partition) {
|
||||||
|
// TODO(bartekn): Use the original root instead of creating a new one. It'd
|
||||||
|
// result in one less partition, but come at a cost of commingling types.
|
||||||
|
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
|
||||||
|
ThreadSafePartitionRoot({
|
||||||
|
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||||
|
base::PartitionOptions::ThreadCache::kDisabled,
|
||||||
|
base::PartitionOptions::Quarantine::kAllowed,
|
||||||
|
base::PartitionOptions::Cookie::kAllowed,
|
||||||
|
base::PartitionOptions::BackupRefPtr::kDisabled,
|
||||||
|
base::PartitionOptions::UseConfigurablePool::kNo,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// The new main root can also support AlignedAlloc.
|
||||||
|
new_aligned_root = new_root;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now switch traffic to the new partitions.
|
||||||
|
g_aligned_root.Replace(new_aligned_root);
|
||||||
|
g_root.Replace(new_root);
|
||||||
|
|
||||||
|
// g_original_root has to be set after g_root, because other code doesn't
|
||||||
|
// handle well both pointing to the same root.
|
||||||
|
// TODO(bartekn): Reorder, once handled well. It isn't ideal for one
|
||||||
|
// partition to be invisible temporarily.
|
||||||
|
g_original_root = current_root;
|
||||||
|
|
||||||
|
// No need for g_original_aligned_root, because in cases where g_aligned_root
|
||||||
|
// is replaced, it must've been g_original_root.
|
||||||
|
PA_CHECK(current_aligned_root == g_original_root);
|
||||||
|
|
||||||
|
// Purge memory, now that the traffic to the original partition is cut off.
|
||||||
|
current_root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages);
|
||||||
|
|
||||||
|
if (!use_alternate_bucket_distribution) {
|
||||||
|
g_root.Get()->SwitchToDenserBucketDistribution();
|
||||||
|
g_aligned_root.Get()->SwitchToDenserBucketDistribution();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
void EnablePCScan(base::internal::PCScan::InitConfig config) {
|
||||||
|
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
|
||||||
|
&::base::PlatformThread::SetName);
|
||||||
|
internal::PCScan::Initialize(config);
|
||||||
|
|
||||||
|
internal::PCScan::RegisterScannableRoot(Allocator());
|
||||||
|
if (OriginalAllocator() != nullptr)
|
||||||
|
internal::PCScan::RegisterScannableRoot(OriginalAllocator());
|
||||||
|
if (Allocator() != AlignedAllocator())
|
||||||
|
internal::PCScan::RegisterScannableRoot(AlignedAllocator());
|
||||||
|
|
||||||
|
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
|
||||||
|
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// Call this as soon as possible during startup.
|
||||||
|
void ConfigurePartitionAlloc() {
|
||||||
|
#if defined(ARCH_CPU_X86)
|
||||||
|
if (IsRunning32bitEmulatedOnArm64())
|
||||||
|
g_extra_bytes = 8;
|
||||||
|
#endif // defined(ARCH_CPU_X86)
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&base::internal::PartitionMalloc, // alloc_function
|
||||||
|
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
|
||||||
|
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
|
||||||
|
&base::internal::PartitionMemalign, // alloc_aligned_function
|
||||||
|
&base::internal::PartitionRealloc, // realloc_function
|
||||||
|
&base::internal::PartitionFree, // free_function
|
||||||
|
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
|
||||||
|
&base::internal::PartitionBatchMalloc, // batch_malloc_function
|
||||||
|
&base::internal::PartitionBatchFree, // batch_free_function
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// On Apple OSes, free_definite_size() is always called from free(), since
|
||||||
|
// get_size_estimate() is used to determine whether an allocation belongs to
|
||||||
|
// the current zone. It makes sense to optimize for it.
|
||||||
|
&base::internal::PartitionFreeDefiniteSize,
|
||||||
|
#else
|
||||||
|
nullptr, // free_definite_size_function
|
||||||
|
#endif
|
||||||
|
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
|
||||||
|
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
|
||||||
|
&base::internal::PartitionFree, // aligned_free_function
|
||||||
|
nullptr, // next
|
||||||
|
};
|
||||||
|
|
||||||
|
// Intercept diagnostics symbols as well, even though they are not part of the
|
||||||
|
// unified shim layer.
|
||||||
|
//
|
||||||
|
// TODO(lizeb): Implement the ones that doable.
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
|
||||||
|
base::SimplePartitionStatsDumper allocator_dumper;
|
||||||
|
Allocator()->DumpStats("malloc", true, &allocator_dumper);
|
||||||
|
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
|
||||||
|
|
||||||
|
base::SimplePartitionStatsDumper aligned_allocator_dumper;
|
||||||
|
if (AlignedAllocator() != Allocator()) {
|
||||||
|
AlignedAllocator()->DumpStats("posix_memalign", true,
|
||||||
|
&aligned_allocator_dumper);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump stats for nonscannable and nonquarantinable allocators.
|
||||||
|
auto& nonscannable_allocator =
|
||||||
|
base::internal::NonScannableAllocator::Instance();
|
||||||
|
base::SimplePartitionStatsDumper nonscannable_allocator_dumper;
|
||||||
|
if (auto* nonscannable_root = nonscannable_allocator.root())
|
||||||
|
nonscannable_root->DumpStats("malloc", true,
|
||||||
|
&nonscannable_allocator_dumper);
|
||||||
|
auto& nonquarantinable_allocator =
|
||||||
|
base::internal::NonQuarantinableAllocator::Instance();
|
||||||
|
base::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
|
||||||
|
if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
|
||||||
|
nonquarantinable_root->DumpStats("malloc", true,
|
||||||
|
&nonquarantinable_allocator_dumper);
|
||||||
|
|
||||||
|
struct mallinfo info = {0};
|
||||||
|
info.arena = 0; // Memory *not* allocated with mmap().
|
||||||
|
|
||||||
|
// Memory allocated with mmap(), aka virtual size.
|
||||||
|
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes;
|
||||||
|
// Resident bytes.
|
||||||
|
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_resident_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_resident_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_resident_bytes;
|
||||||
|
// Allocated bytes.
|
||||||
|
info.uordblks = allocator_dumper.stats().total_active_bytes +
|
||||||
|
aligned_allocator_dumper.stats().total_active_bytes +
|
||||||
|
nonscannable_allocator_dumper.stats().total_active_bytes +
|
||||||
|
nonquarantinable_allocator_dumper.stats().total_active_bytes;
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
void InitializeDefaultAllocatorPartitionRoot() {
|
||||||
|
// On OS_APPLE, the initialization of PartitionRoot uses memory allocations
|
||||||
|
// internally, e.g. __builtin_available, and it's not easy to avoid it.
|
||||||
|
// Thus, we initialize the PartitionRoot with using the system default
|
||||||
|
// allocator before we intercept the system default allocator.
|
||||||
|
std::ignore = Allocator();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
|
||||||
|
|
||||||
|
class BASE_EXPORT PartitionAllocMalloc {
|
||||||
|
public:
|
||||||
|
static ThreadSafePartitionRoot* Allocator();
|
||||||
|
// May return |nullptr|, will never return the same pointer as |Allocator()|.
|
||||||
|
static ThreadSafePartitionRoot* OriginalAllocator();
|
||||||
|
// May return the same pointer as |Allocator()|.
|
||||||
|
static ThreadSafePartitionRoot* AlignedAllocator();
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMallocUnchecked(
|
||||||
|
const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t n,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionAlignedAlloc(
|
||||||
|
const base::allocator::AllocatorDispatch* dispatch,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionAlignedRealloc(
|
||||||
|
const base::allocator::AllocatorDispatch* dispatch,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* object,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
BASE_EXPORT size_t
|
||||||
|
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context);
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim.h"
|
||||||
|
|
||||||
|
#include <ostream>
|
||||||
|
|
||||||
|
#include "base/allocator/winheap_stubs_win.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using base::allocator::AllocatorDispatch;
|
||||||
|
|
||||||
|
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapMalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
|
||||||
|
size_t n,
|
||||||
|
size_t elem_size,
|
||||||
|
void* context) {
|
||||||
|
// Overflow check.
|
||||||
|
const size_t size = n * elem_size;
|
||||||
|
if (elem_size != 0 && size / elem_size != n)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
void* result = DefaultWinHeapMallocImpl(self, size, context);
|
||||||
|
if (result) {
|
||||||
|
memset(result, 0, size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
CHECK(false) << "The windows heap does not support memalign.";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
|
||||||
|
void* address,
|
||||||
|
size_t size,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapRealloc(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
base::allocator::WinHeapFree(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
|
||||||
|
void* address,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapGetSizeEstimate(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapAlignedMalloc(size, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
void* context) {
|
||||||
|
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
|
||||||
|
void* ptr,
|
||||||
|
void* context) {
|
||||||
|
base::allocator::WinHeapAlignedFree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// Guarantee that default_dispatch is compile-time initialized to avoid using
|
||||||
|
// it before initialization (allocations before main in release builds with
|
||||||
|
// optimizations disabled).
|
||||||
|
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||||
|
&DefaultWinHeapMallocImpl,
|
||||||
|
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
|
||||||
|
&DefaultWinHeapCallocImpl,
|
||||||
|
&DefaultWinHeapMemalignImpl,
|
||||||
|
&DefaultWinHeapReallocImpl,
|
||||||
|
&DefaultWinHeapFreeImpl,
|
||||||
|
&DefaultWinHeapGetSizeEstimateImpl,
|
||||||
|
nullptr, /* batch_malloc_function */
|
||||||
|
nullptr, /* batch_free_function */
|
||||||
|
nullptr, /* free_definite_size_function */
|
||||||
|
&DefaultWinHeapAlignedMallocImpl,
|
||||||
|
&DefaultWinHeapAlignedReallocImpl,
|
||||||
|
&DefaultWinHeapAlignedFreeImpl,
|
||||||
|
nullptr, /* next */
|
||||||
|
};
|
53
src/base/allocator/allocator_shim_internals.h
Normal file
53
src/base/allocator/allocator_shim_internals.h
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_POSIX)
|
||||||
|
#include <sys/cdefs.h> // for __THROW
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __THROW // Not a glibc system
|
||||||
|
#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
|
||||||
|
#define __THROW _NOEXCEPT
|
||||||
|
#else
|
||||||
|
#define __THROW
|
||||||
|
#endif // !_NOEXCEPT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
|
||||||
|
//
|
||||||
|
// If an exported symbol is linked into a DSO, it may be preempted by a
|
||||||
|
// definition in the main executable. If this happens to an allocator symbol, it
|
||||||
|
// will mean that the DSO will use the main executable's allocator. This is
|
||||||
|
// normally relatively harmless -- regular allocations should all use the same
|
||||||
|
// allocator, but if the DSO tries to hook the allocator it will not see any
|
||||||
|
// allocations.
|
||||||
|
//
|
||||||
|
// However, if LLVM LTO is enabled, the compiler may inline the shim layer
|
||||||
|
// symbols into callers. The end result is that allocator calls in DSOs may use
|
||||||
|
// either the main executable's allocator or the DSO's allocator, depending on
|
||||||
|
// whether the call was inlined. This is arguably a bug in LLVM caused by its
|
||||||
|
// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
|
||||||
|
// To work around the bug we use noinline to prevent the symbols from being
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// In the long run we probably want to avoid linking the allocator bits into
|
||||||
|
// DSOs altogether. This will save a little space and stop giving DSOs the false
|
||||||
|
// impression that they can hook the allocator.
|
||||||
|
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_WIN) // __GNUC__
|
||||||
|
|
||||||
|
#define __THROW
|
||||||
|
#define SHIM_ALWAYS_EXPORT __declspec(noinline)
|
||||||
|
|
||||||
|
#endif // __GNUC__
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
166
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
166
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||||
|
|
||||||
|
// Preempt the default new/delete C++ symbols so they call the shim entry
|
||||||
|
// points. This file is strongly inspired by tcmalloc's
|
||||||
|
// libc_override_redefine.h.
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// std::align_val_t isn't available until C++17, but we want to override aligned
|
||||||
|
// new/delete anyway to prevent a possible situation where a library gets loaded
|
||||||
|
// in that uses the aligned operators. We want to avoid a situation where
|
||||||
|
// separate heaps are used.
|
||||||
|
// TODO(thomasanderson): Remove this once building with C++17 or later.
|
||||||
|
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
|
||||||
|
#define ALIGN_VAL_T std::align_val_t
|
||||||
|
#define ALIGN_LINKAGE
|
||||||
|
#define ALIGN_NEW operator new
|
||||||
|
#define ALIGN_NEW_NOTHROW operator new
|
||||||
|
#define ALIGN_DEL operator delete
|
||||||
|
#define ALIGN_DEL_SIZED operator delete
|
||||||
|
#define ALIGN_DEL_NOTHROW operator delete
|
||||||
|
#define ALIGN_NEW_ARR operator new[]
|
||||||
|
#define ALIGN_NEW_ARR_NOTHROW operator new[]
|
||||||
|
#define ALIGN_DEL_ARR operator delete[]
|
||||||
|
#define ALIGN_DEL_ARR_SIZED operator delete[]
|
||||||
|
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
|
||||||
|
#else
|
||||||
|
#define ALIGN_VAL_T size_t
|
||||||
|
#define ALIGN_LINKAGE extern "C"
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#error "Mangling is different on these platforms."
|
||||||
|
#else
|
||||||
|
#define ALIGN_NEW _ZnwmSt11align_val_t
|
||||||
|
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
|
||||||
|
#define ALIGN_DEL _ZdlPvSt11align_val_t
|
||||||
|
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
|
||||||
|
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
|
||||||
|
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
|
||||||
|
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
|
||||||
|
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
|
||||||
|
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
|
||||||
|
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !BUILDFLAG(IS_APPLE)
|
||||||
|
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
|
||||||
|
#else
|
||||||
|
// On Apple OSes, prefer not exporting these symbols (as this reverts to the
|
||||||
|
// default behavior, they are still exported in e.g. component builds). This is
|
||||||
|
// partly due to intentional limits on exported symbols in the main library, but
|
||||||
|
// it is also needless, since no library used on macOS imports these.
|
||||||
|
//
|
||||||
|
// TODO(lizeb): It may not be necessary anywhere to export these.
|
||||||
|
#define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {
|
||||||
|
return ShimCppNew(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size) {
|
||||||
|
return ShimCppNew(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](size_t size,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p, size_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW(std::size_t size,
|
||||||
|
ALIGN_VAL_T alignment) {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_NOTHROW(
|
||||||
|
std::size_t size,
|
||||||
|
ALIGN_VAL_T alignment,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL(void* p,
|
||||||
|
ALIGN_VAL_T) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||||
|
ALIGN_DEL_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||||
|
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR(
|
||||||
|
std::size_t size,
|
||||||
|
ALIGN_VAL_T alignment) {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
|
||||||
|
std::size_t size,
|
||||||
|
ALIGN_VAL_T alignment,
|
||||||
|
const std::nothrow_t&) __THROW {
|
||||||
|
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL_ARR(void* p,
|
||||||
|
ALIGN_VAL_T) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||||
|
ALIGN_DEL_ARR_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
|
||||||
|
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||||
|
ShimCppDelete(p);
|
||||||
|
}
|
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||||
|
|
||||||
|
// Alias the internal Glibc symbols to the shim entry points.
|
||||||
|
// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
|
||||||
|
// Effectively this file does two things:
|
||||||
|
// 1) Re-define the __malloc_hook & co symbols. Those symbols are defined as
|
||||||
|
// weak in glibc and are meant to be defined strongly by client processes
|
||||||
|
// to hook calls initiated from within glibc.
|
||||||
|
// 2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
|
||||||
|
// is that in the past (in RedHat 9) we had instances of libraries that were
|
||||||
|
// allocating via malloc() and freeing using __libc_free().
|
||||||
|
// See tcmalloc's libc_override_glibc.h for more context.
|
||||||
|
|
||||||
|
#include <features.h> // for __GLIBC__
|
||||||
|
#include <malloc.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
|
||||||
|
#if !defined(__MALLOC_HOOK_VOLATILE)
|
||||||
|
#define MALLOC_HOOK_MAYBE_VOLATILE /**/
|
||||||
|
#else
|
||||||
|
#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// 1) Re-define malloc_hook weak symbols.
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void* GlibcMallocHook(size_t size, const void* caller) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlibcFreeHook(void* ptr, const void* caller) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
|
||||||
|
return ShimMemalign(align, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
|
||||||
|
const void*) = &GlibcMallocHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
|
||||||
|
&GlibcReallocHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
|
||||||
|
const void*) = &GlibcFreeHook;
|
||||||
|
|
||||||
|
__attribute__((visibility("default"))) void* (
|
||||||
|
*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
|
||||||
|
&GlibcMemalignHook;
|
||||||
|
|
||||||
|
// 2) Redefine libc symbols themselves.
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
|
||||||
|
return ShimPosixMemalign(r, a, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
// Safety check.
|
||||||
|
#if !defined(__GLIBC__)
|
||||||
|
#error The target platform does not seem to use Glibc. Disable the allocator \
|
||||||
|
shim by setting use_allocator_shim=false in GN args.
|
||||||
|
#endif
|
88
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
88
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Its purpose is to preempt the Libc symbols for malloc/new so they call the
|
||||||
|
// shim layer entry points.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
#else
|
||||||
|
#include <malloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
// WARNING: Whenever a new function is added there (which, surprisingly enough,
|
||||||
|
// happens. For instance glibc 2.33 introduced mallinfo2(), which we don't
|
||||||
|
// support... yet?), it MUST be added to build/linux/chrome.map.
|
||||||
|
//
|
||||||
|
// Otherwise the new symbol is not exported from Chromium's main binary, which
|
||||||
|
// is necessary to override libc's weak symbol, which in turn is necessary to
|
||||||
|
// intercept calls made by dynamic libraries. See crbug.com/1292206 for such
|
||||||
|
// an example.
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
|
||||||
|
return ShimMemalign(align, s, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
|
||||||
|
return ShimPosixMemalign(r, a, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
|
||||||
|
return ShimGetSizeEstimate(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
|
||||||
|
return ShimGetSizeEstimate(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default dispatch translation unit has to define also the following
|
||||||
|
// symbols (unless they are ultimately routed to the system symbols):
|
||||||
|
// void malloc_stats(void);
|
||||||
|
// int mallopt(int, int);
|
||||||
|
// struct mallinfo mallinfo(void);
|
||||||
|
|
||||||
|
} // extern "C"
|
@ -0,0 +1,147 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||||
|
|
||||||
|
// This header overrides the __wrap_X symbols when using the link-time
|
||||||
|
// -Wl,-wrap,malloc shim-layer approach (see README.md).
|
||||||
|
// All references to malloc, free, etc. within the linker unit that gets the
|
||||||
|
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
|
||||||
|
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
|
||||||
|
return ShimMemalign(align, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
|
||||||
|
size_t align,
|
||||||
|
size_t size) {
|
||||||
|
return ShimPosixMemalign(res, align, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
|
||||||
|
return ShimPvalloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
|
||||||
|
return ShimRealloc(address, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t kPathMaxSize = 8192;
|
||||||
|
static_assert(kPathMaxSize >= PATH_MAX, "");
|
||||||
|
|
||||||
|
extern char* __wrap_strdup(const char* str);
|
||||||
|
|
||||||
|
// Override <stdlib.h>
|
||||||
|
|
||||||
|
extern char* __real_realpath(const char* path, char* resolved_path);
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
|
||||||
|
char* resolved_path) {
|
||||||
|
if (resolved_path)
|
||||||
|
return __real_realpath(path, resolved_path);
|
||||||
|
|
||||||
|
char buffer[kPathMaxSize];
|
||||||
|
if (!__real_realpath(path, buffer))
|
||||||
|
return nullptr;
|
||||||
|
return __wrap_strdup(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override <string.h> functions
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
|
||||||
|
std::size_t length = std::strlen(str) + 1;
|
||||||
|
void* buffer = ShimMalloc(length, nullptr);
|
||||||
|
if (!buffer)
|
||||||
|
return nullptr;
|
||||||
|
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
|
||||||
|
std::size_t length = std::min(std::strlen(str), n);
|
||||||
|
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
|
||||||
|
if (!buffer)
|
||||||
|
return nullptr;
|
||||||
|
std::memcpy(buffer, str, length);
|
||||||
|
buffer[length] = '\0';
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override <unistd.h>
|
||||||
|
|
||||||
|
extern char* __real_getcwd(char* buffer, size_t size);
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
|
||||||
|
if (buffer)
|
||||||
|
return __real_getcwd(buffer, size);
|
||||||
|
|
||||||
|
if (!size)
|
||||||
|
size = kPathMaxSize;
|
||||||
|
char local_buffer[size];
|
||||||
|
if (!__real_getcwd(local_buffer, size))
|
||||||
|
return nullptr;
|
||||||
|
return __wrap_strdup(local_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override stdio.h
|
||||||
|
|
||||||
|
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
|
||||||
|
// Android, and used by libc++.
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
|
||||||
|
const char* fmt,
|
||||||
|
va_list va_args) {
|
||||||
|
constexpr int kInitialSize = 128;
|
||||||
|
*strp = static_cast<char*>(
|
||||||
|
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
|
||||||
|
|
||||||
|
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
|
||||||
|
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
|
||||||
|
|
||||||
|
// Now we know the size. This is not very efficient, but we cannot really do
|
||||||
|
// better without accessing internal libc functions, or reimplementing
|
||||||
|
// *printf().
|
||||||
|
//
|
||||||
|
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
|
||||||
|
// details.
|
||||||
|
if (actual_size >= kInitialSize)
|
||||||
|
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
|
||||||
|
|
||||||
|
return actual_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
|
||||||
|
va_list va_args;
|
||||||
|
va_start(va_args, fmt);
|
||||||
|
int retval = vasprintf(strp, fmt, va_args);
|
||||||
|
va_end(va_args);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
377
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
377
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#error This header must be included iff PartitionAlloc-Everywhere is enabled.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base/allocator/early_zone_registration_mac.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/bits.h"
|
||||||
|
#include "base/logging.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Defined in base/allocator/partition_allocator/partition_root.cc
|
||||||
|
void PartitionAllocMallocHookOnBeforeForkInParent();
|
||||||
|
void PartitionAllocMallocHookOnAfterForkInParent();
|
||||||
|
void PartitionAllocMallocHookOnAfterForkInChild();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base::allocator {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// malloc_introspection_t's callback functions for our own zone
|
||||||
|
|
||||||
|
kern_return_t MallocIntrospectionEnumerator(task_t task,
|
||||||
|
void*,
|
||||||
|
unsigned type_mask,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
vm_range_recorder_t recorder) {
|
||||||
|
// Should enumerate all memory regions allocated by this allocator, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
return KERN_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
|
||||||
|
return base::bits::AlignUp(size, partition_alloc::internal::kAlignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
|
||||||
|
// Should check the consistency of the allocator implementing this malloc
|
||||||
|
// zone, but not implemented just because of no use case for now.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
|
||||||
|
// Should print the current states of the zone for debugging / investigation
|
||||||
|
// purpose, but not implemented just because of no use case for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
|
||||||
|
// Should enable logging of the activities on the given `address`, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
|
||||||
|
// Called before fork(2) to acquire the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
|
||||||
|
// Called in the parent process after fork(2) to release the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionStatistics(malloc_zone_t* zone,
|
||||||
|
malloc_statistics_t* stats) {
|
||||||
|
// Should report the memory usage correctly, but not implemented just because
|
||||||
|
// of no use case for now.
|
||||||
|
stats->blocks_in_use = 0;
|
||||||
|
stats->size_in_use = 0;
|
||||||
|
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||||
|
stats->size_allocated = 0; // Reserved in memory
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
|
||||||
|
// Should return true if the underlying PartitionRoot is locked, but not
|
||||||
|
// implemented just because this function seems not used effectively.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionEnumerateDischargedPointers(
|
||||||
|
malloc_zone_t* zone,
|
||||||
|
void (^report_discharged)(void* memory, void* info)) {
|
||||||
|
// 'discharge' is not supported.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
|
||||||
|
// Called in a child process after fork(2) to re-initialize the lock.
|
||||||
|
partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionPrintTask(task_t task,
|
||||||
|
unsigned level,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
print_task_printer_t printer) {
|
||||||
|
// Should print the current states of another process's zone for debugging /
|
||||||
|
// investigation purpose, but not implemented just because of no use case
|
||||||
|
// for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocIntrospectionTaskStatistics(task_t task,
|
||||||
|
vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
malloc_statistics_t* stats) {
|
||||||
|
// Should report the memory usage in another process's zone, but not
|
||||||
|
// implemented just because of no use case for now.
|
||||||
|
stats->blocks_in_use = 0;
|
||||||
|
stats->size_in_use = 0;
|
||||||
|
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||||
|
stats->size_allocated = 0; // Reserved in memory
|
||||||
|
}
|
||||||
|
|
||||||
|
// malloc_zone_t's callback functions for our own zone
|
||||||
|
|
||||||
|
size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
|
||||||
|
return ShimGetSizeEstimate(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
|
||||||
|
return ShimValloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
|
||||||
|
return ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneDestroy(malloc_zone_t* zone) {
|
||||||
|
// No support to destroy the zone for now.
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
|
||||||
|
return ShimMemalign(alignment, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return ShimFreeDefiniteSize(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned MallocZoneBatchMalloc(malloc_zone_t* zone,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested) {
|
||||||
|
return ShimBatchMalloc(size, results, num_requested, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MallocZoneBatchFree(malloc_zone_t* zone,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num) {
|
||||||
|
return ShimBatchFree(to_be_freed, num, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
malloc_introspection_t g_mac_malloc_introspection{};
|
||||||
|
malloc_zone_t g_mac_malloc_zone{};
|
||||||
|
|
||||||
|
malloc_zone_t* GetDefaultMallocZone() {
|
||||||
|
// malloc_default_zone() does not return... the default zone, but the initial
|
||||||
|
// one. The default one is the first element of the default zone array.
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||||
|
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsAlreadyRegistered() {
|
||||||
|
// HACK: This should really only be called once, but it is not.
|
||||||
|
//
|
||||||
|
// This function is a static constructor of its binary. If it is included in a
|
||||||
|
// dynamic library, then the same process may end up executing this code
|
||||||
|
// multiple times, once per library. As a consequence, each new library will
|
||||||
|
// add its own allocator as the default zone. Aside from splitting the heap
|
||||||
|
// further, the main issue arises if/when the last library to be loaded
|
||||||
|
// (dlopen()-ed) gets dlclose()-ed.
|
||||||
|
//
|
||||||
|
// See crbug.com/1271139 for details.
|
||||||
|
//
|
||||||
|
// In this case, subsequent free() will be routed by libmalloc to the deleted
|
||||||
|
// zone (since its code has been unloaded from memory), and crash inside
|
||||||
|
// libsystem's free(). This in practice happens as soon as dlclose() is
|
||||||
|
// called, inside the dynamic linker (dyld).
|
||||||
|
//
|
||||||
|
// Since we are talking about different library, and issues inside the dynamic
|
||||||
|
// linker, we cannot use a global static variable (which would be
|
||||||
|
// per-library), or anything from pthread.
|
||||||
|
//
|
||||||
|
// The solution used here is to check whether the current default zone is
|
||||||
|
// already ours, in which case we are not the first dynamic library here, and
|
||||||
|
// should do nothing. This is racy, and hacky.
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
// *Not* using malloc_default_zone(), as it seems to be hardcoded to return
|
||||||
|
// something else than the default zone. See the difference between
|
||||||
|
// malloc_default_zone() and inline_malloc_default_zone() in Apple's malloc.c
|
||||||
|
// (in libmalloc).
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||||
|
// Checking all the zones, in case someone registered their own zone on top of
|
||||||
|
// our own.
|
||||||
|
for (unsigned int i = 0; i < zone_count; i++) {
|
||||||
|
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||||
|
|
||||||
|
// strcmp() and not a pointer comparison, as the zone was registered from
|
||||||
|
// another library, the pointers don't match.
|
||||||
|
if (zone->zone_name &&
|
||||||
|
(strcmp(zone->zone_name, partition_alloc::kPartitionAllocZoneName) ==
|
||||||
|
0)) {
|
||||||
|
// This zone is provided by PartitionAlloc, so this function has been
|
||||||
|
// called from another library (or the main executable), nothing to do.
|
||||||
|
//
|
||||||
|
// This should be a crash, ideally, but callers do it, so only warn, for
|
||||||
|
// now.
|
||||||
|
RAW_LOG(ERROR,
|
||||||
|
"Trying to load the allocator multiple times. This is *not* "
|
||||||
|
"supported.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeZone() {
|
||||||
|
g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
|
||||||
|
g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
|
||||||
|
g_mac_malloc_introspection.check = MallocIntrospectionCheck;
|
||||||
|
g_mac_malloc_introspection.print = MallocIntrospectionPrint;
|
||||||
|
g_mac_malloc_introspection.log = MallocIntrospectionLog;
|
||||||
|
g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
|
||||||
|
g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
|
||||||
|
g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
|
||||||
|
g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
|
||||||
|
g_mac_malloc_introspection.enable_discharge_checking =
|
||||||
|
MallocIntrospectionEnableDischargeChecking;
|
||||||
|
g_mac_malloc_introspection.disable_discharge_checking =
|
||||||
|
MallocIntrospectionDisableDischargeChecking;
|
||||||
|
g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
|
||||||
|
g_mac_malloc_introspection.enumerate_discharged_pointers =
|
||||||
|
MallocIntrospectionEnumerateDischargedPointers;
|
||||||
|
g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
|
||||||
|
g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
|
||||||
|
g_mac_malloc_introspection.task_statistics =
|
||||||
|
MallocIntrospectionTaskStatistics;
|
||||||
|
// `version` member indicates which APIs are supported in this zone.
|
||||||
|
// version >= 5: memalign is supported
|
||||||
|
// version >= 6: free_definite_size is supported
|
||||||
|
// version >= 7: introspect's discharge family is supported
|
||||||
|
// version >= 8: pressure_relief is supported
|
||||||
|
// version >= 9: introspect.reinit_lock is supported
|
||||||
|
// version >= 10: claimed_address is supported
|
||||||
|
// version >= 11: introspect.print_task is supported
|
||||||
|
// version >= 12: introspect.task_statistics is supported
|
||||||
|
g_mac_malloc_zone.version = partition_alloc::kZoneVersion;
|
||||||
|
g_mac_malloc_zone.zone_name = partition_alloc::kPartitionAllocZoneName;
|
||||||
|
g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
|
||||||
|
g_mac_malloc_zone.size = MallocZoneSize;
|
||||||
|
g_mac_malloc_zone.malloc = MallocZoneMalloc;
|
||||||
|
g_mac_malloc_zone.calloc = MallocZoneCalloc;
|
||||||
|
g_mac_malloc_zone.valloc = MallocZoneValloc;
|
||||||
|
g_mac_malloc_zone.free = MallocZoneFree;
|
||||||
|
g_mac_malloc_zone.realloc = MallocZoneRealloc;
|
||||||
|
g_mac_malloc_zone.destroy = MallocZoneDestroy;
|
||||||
|
g_mac_malloc_zone.batch_malloc = MallocZoneBatchMalloc;
|
||||||
|
g_mac_malloc_zone.batch_free = MallocZoneBatchFree;
|
||||||
|
g_mac_malloc_zone.memalign = MallocZoneMemalign;
|
||||||
|
g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
|
||||||
|
g_mac_malloc_zone.pressure_relief = nullptr;
|
||||||
|
g_mac_malloc_zone.claimed_address = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces the default malloc zone with our own malloc zone backed by
|
||||||
|
// PartitionAlloc. Since we'd like to make as much code as possible to use our
|
||||||
|
// own memory allocator (and reduce bugs caused by mixed use of the system
|
||||||
|
// allocator and our own allocator), run the following function
|
||||||
|
// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
|
||||||
|
//
|
||||||
|
// Note that, despite of the highest priority of the initialization order,
|
||||||
|
// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
|
||||||
|
// unfortunately and allocates memory with the system allocator. Plus, the
|
||||||
|
// allocated memory will be deallocated with the default zone's `free` at that
|
||||||
|
// moment without using a zone dispatcher. Hence, our own `free` function
|
||||||
|
// receives an address allocated by the system allocator.
|
||||||
|
__attribute__((constructor(0))) void
|
||||||
|
InitializeDefaultMallocZoneWithPartitionAlloc() {
|
||||||
|
if (IsAlreadyRegistered())
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Instantiate the existing regular and purgeable zones in order to make the
|
||||||
|
// existing purgeable zone use the existing regular zone since PartitionAlloc
|
||||||
|
// doesn't support a purgeable zone.
|
||||||
|
std::ignore = malloc_default_zone();
|
||||||
|
std::ignore = malloc_default_purgeable_zone();
|
||||||
|
|
||||||
|
// Initialize the default allocator's PartitionRoot with the existing zone.
|
||||||
|
InitializeDefaultAllocatorPartitionRoot();
|
||||||
|
|
||||||
|
// Create our own malloc zone.
|
||||||
|
InitializeZone();
|
||||||
|
|
||||||
|
malloc_zone_t* system_default_zone = GetDefaultMallocZone();
|
||||||
|
if (strcmp(system_default_zone->zone_name,
|
||||||
|
partition_alloc::kDelegatingZoneName) == 0) {
|
||||||
|
// The first zone is our zone, we can unregister it, replacing it with the
|
||||||
|
// new one. This relies on a precise zone setup, done in
|
||||||
|
// |EarlyMallocZoneRegistration()|.
|
||||||
|
malloc_zone_register(&g_mac_malloc_zone);
|
||||||
|
malloc_zone_unregister(system_default_zone);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not in the path where the zone was registered early. This is either racy,
|
||||||
|
// or fine if the current process is not hosting multiple threads.
|
||||||
|
//
|
||||||
|
// This path is fine for e.g. most unit tests.
|
||||||
|
//
|
||||||
|
// Make our own zone the default zone.
|
||||||
|
//
|
||||||
|
// Put our own zone at the last position, so that it promotes to the default
|
||||||
|
// zone. The implementation logic of malloc_zone_unregister is:
|
||||||
|
// zone_table.swap(unregistered_zone, last_zone);
|
||||||
|
// zone_table.shrink_size_by_1();
|
||||||
|
malloc_zone_register(&g_mac_malloc_zone);
|
||||||
|
malloc_zone_unregister(system_default_zone);
|
||||||
|
// Between malloc_zone_unregister(system_default_zone) (above) and
|
||||||
|
// malloc_zone_register(system_default_zone) (below), i.e. while absence of
|
||||||
|
// system_default_zone, it's possible that another thread calls free(ptr) and
|
||||||
|
// "no zone found" error is hit, crashing the process.
|
||||||
|
malloc_zone_register(system_default_zone);
|
||||||
|
|
||||||
|
// Confirm that our own zone is now the default zone.
|
||||||
|
CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
} // namespace base::allocator
|
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||||
|
#error This header is meant to be included only once by allocator_shim.cc
|
||||||
|
#endif
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||||
|
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
|
||||||
|
MallocZoneFunctions new_functions;
|
||||||
|
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
|
||||||
|
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||||
|
return ShimGetSizeEstimate(ptr, zone);
|
||||||
|
};
|
||||||
|
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||||
|
return ShimMalloc(size, zone);
|
||||||
|
};
|
||||||
|
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimCalloc(n, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||||
|
return ShimValloc(size, zone);
|
||||||
|
};
|
||||||
|
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
ShimFree(ptr, zone);
|
||||||
|
};
|
||||||
|
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimRealloc(ptr, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested) -> unsigned {
|
||||||
|
return ShimBatchMalloc(size, results, num_requested, zone);
|
||||||
|
};
|
||||||
|
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed) -> void {
|
||||||
|
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
|
||||||
|
};
|
||||||
|
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||||
|
size_t size) -> void* {
|
||||||
|
return ShimMemalign(alignment, size, zone);
|
||||||
|
};
|
||||||
|
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
ShimFreeDefiniteSize(ptr, size, zone);
|
||||||
|
};
|
||||||
|
return new_functions;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This header defines symbols to override the same functions in the Visual C++
|
||||||
|
// CRT implementation.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||||
|
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||||
|
|
||||||
|
#include <malloc.h>
|
||||||
|
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_internals.h"
|
||||||
|
|
||||||
|
// Even though most C++ allocation operators can be left alone since the
|
||||||
|
// interception works at a lower level, these ones should be
|
||||||
|
// overridden. Otherwise they redirect to malloc(), which is configured to crash
|
||||||
|
// with an OOM in failure cases, such as allocation requests that are too large.
|
||||||
|
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
|
||||||
|
const std::nothrow_t&) noexcept {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
|
||||||
|
const std::nothrow_t&) noexcept {
|
||||||
|
return ShimCppNewNoThrow(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
|
||||||
|
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
int win_new_mode = 0;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// This function behaves similarly to MSVC's _set_new_mode.
|
||||||
|
// If flag is 0 (default), calls to malloc will behave normally.
|
||||||
|
// If flag is 1, calls to malloc will behave like calls to new,
|
||||||
|
// and the std_new_handler will be invoked on failure.
|
||||||
|
// Returns the previous mode.
|
||||||
|
//
|
||||||
|
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
|
||||||
|
int _set_new_mode(int flag) {
|
||||||
|
// The MS CRT calls this function early on in startup, so this serves as a low
|
||||||
|
// overhead proof that the allocator shim is in place for this process.
|
||||||
|
base::allocator::g_is_win_shim_layer_initialized = true;
|
||||||
|
int old_mode = win_new_mode;
|
||||||
|
win_new_mode = flag;
|
||||||
|
|
||||||
|
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
|
||||||
|
|
||||||
|
return old_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
|
||||||
|
int _query_new_mode() {
|
||||||
|
return win_new_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
// These symbols override the CRT's implementation of the same functions.
|
||||||
|
__declspec(restrict) void* malloc(size_t size) {
|
||||||
|
return ShimMalloc(size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void free(void* ptr) {
|
||||||
|
ShimFree(ptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* realloc(void* ptr, size_t size) {
|
||||||
|
return ShimRealloc(ptr, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* calloc(size_t n, size_t size) {
|
||||||
|
return ShimCalloc(n, size, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// _msize() is the Windows equivalent of malloc_size().
|
||||||
|
size_t _msize(void* memblock) {
|
||||||
|
return ShimGetSizeEstimate(memblock, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
|
||||||
|
return ShimAlignedMalloc(size, alignment, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_realloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment) {
|
||||||
|
return ShimAlignedRealloc(address, size, alignment, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void _aligned_free(void* address) {
|
||||||
|
ShimAlignedFree(address, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// _recalloc_base is called by CRT internally.
|
||||||
|
__declspec(restrict) void* _recalloc_base(void* block,
|
||||||
|
size_t count,
|
||||||
|
size_t size) {
|
||||||
|
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
|
||||||
|
base::CheckedNumeric<size_t> new_block_size_checked = count;
|
||||||
|
new_block_size_checked *= size;
|
||||||
|
const size_t new_block_size = new_block_size_checked.ValueOrDie();
|
||||||
|
|
||||||
|
void* const new_block = realloc(block, new_block_size);
|
||||||
|
|
||||||
|
if (new_block != nullptr && old_block_size < new_block_size) {
|
||||||
|
memset(static_cast<char*>(new_block) + old_block_size, 0,
|
||||||
|
new_block_size - old_block_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _malloc_base(size_t size) {
|
||||||
|
return malloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
|
||||||
|
return calloc(n, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void _free_base(void* block) {
|
||||||
|
free(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
|
||||||
|
return _recalloc_base(block, count, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following uncommon _aligned_* routines are not used in Chromium and have
|
||||||
|
// been shimmed to immediately crash to ensure that implementations are added if
|
||||||
|
// uses are introduced.
|
||||||
|
__declspec(restrict) void* _aligned_recalloc(void* address,
|
||||||
|
size_t num,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_realloc(void* address,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
|
||||||
|
size_t num,
|
||||||
|
size_t size,
|
||||||
|
size_t alignment,
|
||||||
|
size_t offset) {
|
||||||
|
CHECK(false) << "This routine has not been implemented";
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // extern "C"
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
256
src/base/allocator/early_zone_registration_mac.cc
Normal file
256
src/base/allocator/early_zone_registration_mac.cc
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/early_zone_registration_mac.h"
|
||||||
|
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
|
||||||
|
// BASE_EXPORT tends to be defined as soon as anything from //base is included.
|
||||||
|
#if defined(BASE_EXPORT)
|
||||||
|
#error "This file cannot depend on //base"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {}
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
// abort_report_np() records the message in a special section that both the
|
||||||
|
// system CrashReporter and Crashpad collect in crash reports. See also in
|
||||||
|
// chrome_exe_main_mac.cc.
|
||||||
|
void abort_report_np(const char* fmt, ...);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
malloc_zone_t* GetDefaultMallocZone() {
|
||||||
|
// malloc_default_zone() does not return... the default zone, but the
|
||||||
|
// initial one. The default one is the first element of the default zone
|
||||||
|
// array.
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS)
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
return reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void EarlyMallocZoneRegistration() {
|
||||||
|
// Must have static storage duration, as raw pointers are passed to
|
||||||
|
// libsystem_malloc.
|
||||||
|
static malloc_zone_t g_delegating_zone;
|
||||||
|
static malloc_introspection_t g_delegating_zone_introspect;
|
||||||
|
static malloc_zone_t* g_default_zone;
|
||||||
|
|
||||||
|
// Make sure that the default zone is instantiated.
|
||||||
|
malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
|
||||||
|
|
||||||
|
g_default_zone = GetDefaultMallocZone();
|
||||||
|
|
||||||
|
// The delegating zone:
|
||||||
|
// - Forwards all allocations to the existing default zone
|
||||||
|
// - Does *not* claim to own any memory, meaning that it will always be
|
||||||
|
// skipped in free() in libsystem_malloc.dylib.
|
||||||
|
//
|
||||||
|
// This is a temporary zone, until it gets replaced by PartitionAlloc, inside
|
||||||
|
// the main library. Since the main library depends on many external
|
||||||
|
// libraries, we cannot install PartitionAlloc as the default zone without
|
||||||
|
// concurrency issues.
|
||||||
|
//
|
||||||
|
// Instead, what we do is here, while the process is single-threaded:
|
||||||
|
// - Register the delegating zone as the default one.
|
||||||
|
// - Set the original (libsystem_malloc's) one as the second zone
|
||||||
|
//
|
||||||
|
// Later, when PartitionAlloc initializes, we replace the default (delegating)
|
||||||
|
// zone with ours. The end state is:
|
||||||
|
// 1. PartitionAlloc zone
|
||||||
|
// 2. libsystem_malloc zone
|
||||||
|
|
||||||
|
// Set up of the delegating zone. Note that it doesn't just forward calls to
|
||||||
|
// the default zone. This is because the system zone's malloc_zone_t pointer
|
||||||
|
// actually points to a larger struct, containing allocator metadata. So if we
|
||||||
|
// pass as the first parameter the "simple" delegating zone pointer, then we
|
||||||
|
// immediately crash inside the system zone functions. So we need to replace
|
||||||
|
// the zone pointer as well.
|
||||||
|
//
|
||||||
|
// Calls fall into 4 categories:
|
||||||
|
// - Allocation calls: forwarded to the real system zone
|
||||||
|
// - "Is this pointer yours" calls: always answer no
|
||||||
|
// - free(): Should never be called, but is in practice, see comments below.
|
||||||
|
// - Diagnostics and debugging: these are typically called for every
|
||||||
|
// zone. They are no-ops for us, as we don't want to double-count, or lock
|
||||||
|
// the data structures of the real zone twice.
|
||||||
|
|
||||||
|
// Allocation: Forward to the real zone.
|
||||||
|
g_delegating_zone.malloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->malloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.calloc = [](malloc_zone_t* zone, size_t num_items,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->calloc(g_default_zone, num_items, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.valloc = [](malloc_zone_t* zone, size_t size) {
|
||||||
|
return g_default_zone->valloc(g_default_zone, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.realloc = [](malloc_zone_t* zone, void* ptr, size_t size) {
|
||||||
|
return g_default_zone->realloc(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_malloc = [](malloc_zone_t* zone, size_t size,
|
||||||
|
void** results, unsigned num_requested) {
|
||||||
|
return g_default_zone->batch_malloc(g_default_zone, size, results,
|
||||||
|
num_requested);
|
||||||
|
};
|
||||||
|
g_delegating_zone.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->memalign(g_default_zone, alignment, size);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Does ptr belong to this zone? Return value is != 0 if so.
|
||||||
|
g_delegating_zone.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Free functions.
|
||||||
|
// The normal path for freeing memory is:
|
||||||
|
// 1. Try all zones in order, call zone->size(ptr)
|
||||||
|
// 2. If zone->size(ptr) != 0, call zone->free(ptr) (or free_definite_size)
|
||||||
|
// 3. If no zone matches, crash.
|
||||||
|
//
|
||||||
|
// Since this zone always returns 0 in size() (see above), then zone->free()
|
||||||
|
// should never be called. Unfortunately, this is not the case, as some places
|
||||||
|
// in CoreFoundation call malloc_zone_free(zone, ptr) directly. So rather than
|
||||||
|
// crashing, forward the call. It's the caller's responsibility to use the
|
||||||
|
// same zone for free() as for the allocation (this is in the contract of
|
||||||
|
// malloc_zone_free()).
|
||||||
|
//
|
||||||
|
// However, note that the sequence of calls size() -> free() is not possible
|
||||||
|
// for this zone, as size() always returns 0.
|
||||||
|
g_delegating_zone.free = [](malloc_zone_t* zone, void* ptr) {
|
||||||
|
return g_default_zone->free(g_default_zone, ptr);
|
||||||
|
};
|
||||||
|
g_delegating_zone.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->free_definite_size(g_default_zone, ptr, size);
|
||||||
|
};
|
||||||
|
g_delegating_zone.batch_free = [](malloc_zone_t* zone, void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed) {
|
||||||
|
return g_default_zone->batch_free(g_default_zone, to_be_freed,
|
||||||
|
num_to_be_freed);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Diagnostics and debugging.
|
||||||
|
//
|
||||||
|
// Do nothing to reduce memory footprint, the real
|
||||||
|
// zone will do it.
|
||||||
|
g_delegating_zone.pressure_relief = [](malloc_zone_t* zone,
|
||||||
|
size_t goal) -> size_t { return 0; };
|
||||||
|
|
||||||
|
// Introspection calls are not all optional, for instance locking and
|
||||||
|
// unlocking before/after fork() is not optional.
|
||||||
|
//
|
||||||
|
// Nothing to enumerate.
|
||||||
|
g_delegating_zone_introspect.enumerator =
|
||||||
|
[](task_t task, void*, unsigned type_mask, vm_address_t zone_address,
|
||||||
|
memory_reader_t reader,
|
||||||
|
vm_range_recorder_t recorder) -> kern_return_t {
|
||||||
|
return KERN_SUCCESS;
|
||||||
|
};
|
||||||
|
// Need to provide a real implementation, it is used for e.g. array sizing.
|
||||||
|
g_delegating_zone_introspect.good_size = [](malloc_zone_t* zone,
|
||||||
|
size_t size) {
|
||||||
|
return g_default_zone->introspect->good_size(g_default_zone, size);
|
||||||
|
};
|
||||||
|
// Nothing to do.
|
||||||
|
g_delegating_zone_introspect.check = [](malloc_zone_t* zone) -> boolean_t {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
g_delegating_zone_introspect.print = [](malloc_zone_t* zone,
|
||||||
|
boolean_t verbose) {};
|
||||||
|
g_delegating_zone_introspect.log = [](malloc_zone_t*, void*) {};
|
||||||
|
// Do not forward the lock / unlock calls. Since the default zone is still
|
||||||
|
// there, we should not lock here, as it would lock the zone twice (all
|
||||||
|
// zones are locked before fork().). Rather, do nothing, since this fake
|
||||||
|
// zone does not need any locking.
|
||||||
|
g_delegating_zone_introspect.force_lock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.force_unlock = [](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.reinit_lock = [](malloc_zone_t* zone) {};
|
||||||
|
// No stats.
|
||||||
|
g_delegating_zone_introspect.statistics = [](malloc_zone_t* zone,
|
||||||
|
malloc_statistics_t* stats) {};
|
||||||
|
// We are not locked.
|
||||||
|
g_delegating_zone_introspect.zone_locked =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
// Don't support discharge checking.
|
||||||
|
g_delegating_zone_introspect.enable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) -> boolean_t { return false; };
|
||||||
|
g_delegating_zone_introspect.disable_discharge_checking =
|
||||||
|
[](malloc_zone_t* zone) {};
|
||||||
|
g_delegating_zone_introspect.discharge = [](malloc_zone_t* zone,
|
||||||
|
void* memory) {};
|
||||||
|
|
||||||
|
// Could use something lower to support fewer functions, but this is
|
||||||
|
// consistent with the real zone installed by PartitionAlloc.
|
||||||
|
g_delegating_zone.version = kZoneVersion;
|
||||||
|
g_delegating_zone.introspect = &g_delegating_zone_introspect;
|
||||||
|
// This name is used in PartitionAlloc's initialization to determine whether
|
||||||
|
// it should replace the delegating zone.
|
||||||
|
g_delegating_zone.zone_name = kDelegatingZoneName;
|
||||||
|
|
||||||
|
// Register puts the new zone at the end, unregister swaps the new zone with
|
||||||
|
// the last one.
|
||||||
|
// The zone array is, after these lines, in order:
|
||||||
|
// 1. |g_default_zone|...|g_delegating_zone|
|
||||||
|
// 2. |g_delegating_zone|...| (no more default)
|
||||||
|
// 3. |g_delegating_zone|...|g_default_zone|
|
||||||
|
malloc_zone_register(&g_delegating_zone);
|
||||||
|
malloc_zone_unregister(g_default_zone);
|
||||||
|
malloc_zone_register(g_default_zone);
|
||||||
|
|
||||||
|
// Make sure that the purgeable zone is after the default one.
|
||||||
|
// Will make g_default_zone take the purgeable zone spot
|
||||||
|
malloc_zone_unregister(purgeable_zone);
|
||||||
|
// Add back the purgeable zone as the last one.
|
||||||
|
malloc_zone_register(purgeable_zone);
|
||||||
|
|
||||||
|
// Final configuration:
|
||||||
|
// |g_delegating_zone|...|g_default_zone|purgeable_zone|
|
||||||
|
|
||||||
|
// Sanity check.
|
||||||
|
if (GetDefaultMallocZone() != &g_delegating_zone)
|
||||||
|
abort_report_np("Failed to install the delegating zone as default.");
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration() {
|
||||||
|
unsigned int zone_count = 0;
|
||||||
|
vm_address_t* zones = nullptr;
|
||||||
|
kern_return_t result =
|
||||||
|
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||||
|
if (result != KERN_SUCCESS)
|
||||||
|
abort_report_np("Cannot enumerate malloc() zones");
|
||||||
|
|
||||||
|
// If PartitionAlloc is one of the zones, *change* its name so that
|
||||||
|
// registration can happen multiple times. This works because zone
|
||||||
|
// registration only keeps a pointer to the struct, it does not copy the data.
|
||||||
|
for (unsigned int i = 0; i < zone_count; i++) {
|
||||||
|
malloc_zone_t* zone = reinterpret_cast<malloc_zone_t*>(zones[i]);
|
||||||
|
if (zone->zone_name &&
|
||||||
|
strcmp(zone->zone_name, kPartitionAllocZoneName) == 0) {
|
||||||
|
zone->zone_name = "RenamedPartitionAlloc";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
} // namespace partition_alloc
|
37
src/base/allocator/early_zone_registration_mac.h
Normal file
37
src/base/allocator/early_zone_registration_mac.h
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_MAC_H_
|
||||||
|
|
||||||
|
// This is an Apple-only file, used to register PartitionAlloc's zone *before*
|
||||||
|
// the process becomes multi-threaded.
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
static constexpr char kDelegatingZoneName[] =
|
||||||
|
"DelegatingDefaultZoneForPartitionAlloc";
|
||||||
|
static constexpr char kPartitionAllocZoneName[] = "PartitionAlloc";
|
||||||
|
|
||||||
|
// Zone version. Determines which callbacks are set in the various malloc_zone_t
|
||||||
|
// structs.
|
||||||
|
constexpr int kZoneVersion = 9;
|
||||||
|
|
||||||
|
// Must be called *once*, *before* the process becomes multi-threaded.
|
||||||
|
void EarlyMallocZoneRegistration();
|
||||||
|
|
||||||
|
// Tricks the registration code to believe that PartitionAlloc was not already
|
||||||
|
// registered. This allows a future library load to register PartitionAlloc's
|
||||||
|
// zone as well, rather than bailing out.
|
||||||
|
//
|
||||||
|
// This is mutually exclusive with EarlyMallocZoneRegistation(), and should
|
||||||
|
// ideally be removed. Indeed, by allowing two zones to be registered, we still
|
||||||
|
// end up with a split heap, and more memory usage.
|
||||||
|
//
|
||||||
|
// This is a hack for crbug.com/1274236.
|
||||||
|
void AllowDoublePartitionAllocZoneRegistration();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_EARLY_ZONE_REGISTRATION_H_
|
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "base/synchronization/lock.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||||
|
static_assert(std::is_pod<MallocZoneFunctions>::value,
|
||||||
|
"MallocZoneFunctions must be POD");
|
||||||
|
|
||||||
|
void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||||
|
MallocZoneFunctions* functions) {
|
||||||
|
memset(functions, 0, sizeof(MallocZoneFunctions));
|
||||||
|
functions->malloc = zone->malloc;
|
||||||
|
functions->calloc = zone->calloc;
|
||||||
|
functions->valloc = zone->valloc;
|
||||||
|
functions->free = zone->free;
|
||||||
|
functions->realloc = zone->realloc;
|
||||||
|
functions->size = zone->size;
|
||||||
|
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||||
|
functions->free && functions->realloc && functions->size);
|
||||||
|
|
||||||
|
// These functions might be nullptr.
|
||||||
|
functions->batch_malloc = zone->batch_malloc;
|
||||||
|
functions->batch_free = zone->batch_free;
|
||||||
|
|
||||||
|
if (zone->version >= 5) {
|
||||||
|
// Not all custom malloc zones have a memalign.
|
||||||
|
functions->memalign = zone->memalign;
|
||||||
|
}
|
||||||
|
if (zone->version >= 6) {
|
||||||
|
// This may be nullptr.
|
||||||
|
functions->free_definite_size = zone->free_definite_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that zone version 8 introduced a pressure relief callback, and version
|
||||||
|
// 10 introduced a claimed address callback, but neither are allocation or
|
||||||
|
// deallocation callbacks and so aren't important to intercept.
|
||||||
|
|
||||||
|
functions->context = zone;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// All modifications to g_malloc_zones are gated behind this lock.
|
||||||
|
// Dispatch to a malloc zone does not need to acquire this lock.
|
||||||
|
base::Lock& GetLock() {
|
||||||
|
static base::Lock* g_lock = new base::Lock;
|
||||||
|
return *g_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnsureMallocZonesInitializedLocked() {
|
||||||
|
GetLock().AssertAcquired();
|
||||||
|
}
|
||||||
|
|
||||||
|
int g_zone_count = 0;
|
||||||
|
|
||||||
|
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
GetLock().AssertAcquired();
|
||||||
|
for (int i = 0; i < g_zone_count; ++i) {
|
||||||
|
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool StoreMallocZone(ChromeMallocZone* zone) {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
if (IsMallocZoneAlreadyStoredLocked(zone))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (g_zone_count == kMaxZoneCount)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
|
||||||
|
++g_zone_count;
|
||||||
|
|
||||||
|
// No other thread can possibly see these stores at this point. The code that
|
||||||
|
// reads these values is triggered after this function returns. so we want to
|
||||||
|
// guarantee that they are committed at this stage"
|
||||||
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
return IsMallocZoneAlreadyStoredLocked(zone);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions) {
|
||||||
|
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int GetMallocZoneCountForTesting() {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
return g_zone_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearAllMallocZonesForTesting() {
|
||||||
|
base::AutoLock l(GetLock());
|
||||||
|
EnsureMallocZonesInitializedLocked();
|
||||||
|
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
|
||||||
|
g_zone_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||||
|
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||||
|
|
||||||
|
#include <malloc/malloc.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/immediate_crash.h"
|
||||||
|
#include "third_party/apple_apsl/malloc.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||||
|
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t num_items,
|
||||||
|
size_t size);
|
||||||
|
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||||
|
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
|
||||||
|
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size);
|
||||||
|
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t alignment,
|
||||||
|
size_t size);
|
||||||
|
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
|
||||||
|
size_t size,
|
||||||
|
void** results,
|
||||||
|
unsigned num_requested);
|
||||||
|
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
|
||||||
|
void** to_be_freed,
|
||||||
|
unsigned num_to_be_freed);
|
||||||
|
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
|
||||||
|
void* ptr,
|
||||||
|
size_t size);
|
||||||
|
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
|
||||||
|
|
||||||
|
struct MallocZoneFunctions {
|
||||||
|
malloc_type malloc;
|
||||||
|
calloc_type calloc;
|
||||||
|
valloc_type valloc;
|
||||||
|
free_type free;
|
||||||
|
realloc_type realloc;
|
||||||
|
memalign_type memalign;
|
||||||
|
batch_malloc_type batch_malloc;
|
||||||
|
batch_free_type batch_free;
|
||||||
|
free_definite_size_type free_definite_size;
|
||||||
|
size_fn_type size;
|
||||||
|
const ChromeMallocZone* context;
|
||||||
|
};
|
||||||
|
|
||||||
|
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||||
|
MallocZoneFunctions* functions);
|
||||||
|
static constexpr int kMaxZoneCount = 30;
|
||||||
|
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||||
|
|
||||||
|
// The array g_malloc_zones stores all information about malloc zones before
|
||||||
|
// they are shimmed. This information needs to be accessed during dispatch back
|
||||||
|
// into the zone, and additional zones may be added later in the execution fo
|
||||||
|
// the program, so the array needs to be both thread-safe and high-performance.
|
||||||
|
//
|
||||||
|
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
|
||||||
|
// never modify the container, which provides thread-safety to iterators. When
|
||||||
|
// we want to add a MallocZoneFunctions to the container, we:
|
||||||
|
// 1. Fill in all the fields.
|
||||||
|
// 2. Update the total zone count.
|
||||||
|
// 3. Insert a memory barrier.
|
||||||
|
// 4. Insert our shim.
|
||||||
|
//
|
||||||
|
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
|
||||||
|
// pointer to the original malloc zone. When we wish to dispatch back to the
|
||||||
|
// original malloc zones, we iterate through the array, looking for a matching
|
||||||
|
// |context|.
|
||||||
|
//
|
||||||
|
// Most allocations go through the default allocator. We will ensure that the
|
||||||
|
// default allocator is stored as the first MallocZoneFunctions.
|
||||||
|
//
|
||||||
|
// Returns whether the zone was successfully stored.
|
||||||
|
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
|
||||||
|
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
|
||||||
|
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
|
||||||
|
ChromeMallocZone* zone,
|
||||||
|
const MallocZoneFunctions* functions);
|
||||||
|
|
||||||
|
BASE_EXPORT int GetMallocZoneCountForTesting();
|
||||||
|
BASE_EXPORT void ClearAllMallocZonesForTesting();
|
||||||
|
|
||||||
|
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
|
||||||
|
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
|
||||||
|
if (g_malloc_zones[i].context == zone)
|
||||||
|
return g_malloc_zones[i];
|
||||||
|
}
|
||||||
|
IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
138
src/base/allocator/partition_alloc_features.cc
Normal file
138
src/base/allocator/partition_alloc_features.cc
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
// When set, instead of crashing when encountering a dangling raw_ptr, the
|
||||||
|
// signatures of the two stacktraces are logged. This is meant to be used only
|
||||||
|
// by Chromium developers. See /docs/dangling_ptr.md
|
||||||
|
const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord{
|
||||||
|
"PartitionAllocDanglingPtrRecord", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
// If enabled, PCScan is turned on by default for all partitions that don't
|
||||||
|
// disable it explicitly.
|
||||||
|
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// If enabled, PCScan is turned on only for the browser's malloc partition.
|
||||||
|
const Feature kPartitionAllocPCScanBrowserOnly{
|
||||||
|
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan is turned on only for the renderer's malloc partition.
|
||||||
|
const Feature kPartitionAllocPCScanRendererOnly{
|
||||||
|
"PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, this instance belongs to the Control group of the BackupRefPtr
|
||||||
|
// binary experiment.
|
||||||
|
const Feature kPartitionAllocBackupRefPtrControl{
|
||||||
|
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// Use a larger maximum thread cache cacheable bucket size.
|
||||||
|
const Feature kPartitionAllocLargeThreadCacheSize{
|
||||||
|
"PartitionAllocLargeThreadCacheSize",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
|
||||||
|
// Not unconditionally enabled on 32 bit Android, since it is a more
|
||||||
|
// memory-constrained platform.
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
|
||||||
|
"PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
const Feature kPartitionAllocBackupRefPtr {
|
||||||
|
"PartitionAllocBackupRefPtr",
|
||||||
|
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
|
||||||
|
kBackupRefPtrEnabledProcessesOptions[] = {
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserOnly, "browser-only"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kBrowserAndRenderer,
|
||||||
|
"browser-and-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kNonRenderer, "non-renderer"},
|
||||||
|
{BackupRefPtrEnabledProcesses::kAllProcesses, "all-processes"}};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "enabled-processes",
|
||||||
|
BackupRefPtrEnabledProcesses::kBrowserOnly,
|
||||||
|
&kBackupRefPtrEnabledProcessesOptions};
|
||||||
|
|
||||||
|
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
|
||||||
|
{BackupRefPtrMode::kDisabled, "disabled"},
|
||||||
|
{BackupRefPtrMode::kEnabled, "enabled"},
|
||||||
|
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
|
||||||
|
"disabled-but-2-way-split"},
|
||||||
|
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
|
||||||
|
"disabled-but-3-way-split"},
|
||||||
|
};
|
||||||
|
|
||||||
|
const base::FeatureParam<BackupRefPtrMode> kBackupRefPtrModeParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "brp-mode", BackupRefPtrMode::kEnabled,
|
||||||
|
&kBackupRefPtrModeOptions};
|
||||||
|
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableDereferenceCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-dereference-check", true};
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableExtractionCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-extraction-check",
|
||||||
|
false}; // Not much noise at the moment to enable by default.
|
||||||
|
const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
|
||||||
|
&kPartitionAllocBackupRefPtr, "asan-enable-instantiation-check", true};
|
||||||
|
|
||||||
|
// If enabled, switches the bucket distribution to an alternate one. The
|
||||||
|
// alternate distribution must have buckets that are a subset of the default
|
||||||
|
// one.
|
||||||
|
const Feature kPartitionAllocUseAlternateDistribution{
|
||||||
|
"PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
|
||||||
|
// affect whether PCScan is enabled itself.
|
||||||
|
const Feature kPartitionAllocPCScanMUAwareScheduler{
|
||||||
|
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan frees unconditionally all quarantined objects.
|
||||||
|
// This is a performance testing feature.
|
||||||
|
const Feature kPartitionAllocPCScanImmediateFreeing{
|
||||||
|
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// If enabled, PCScan clears eagerly (synchronously) on free().
|
||||||
|
const Feature kPartitionAllocPCScanEagerClearing{
|
||||||
|
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// In addition to heap, scan also the stack of the current mutator.
|
||||||
|
const Feature kPartitionAllocPCScanStackScanning {
|
||||||
|
"PartitionAllocPCScanStackScanning",
|
||||||
|
#if defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||||
|
FEATURE_ENABLED_BY_DEFAULT
|
||||||
|
#else
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT
|
||||||
|
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||||
|
};
|
||||||
|
|
||||||
|
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
|
||||||
|
FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
// Whether to sort the active slot spans in PurgeMemory().
|
||||||
|
extern const Feature kPartitionAllocSortActiveSlotSpans{
|
||||||
|
"PartitionAllocSortActiveSlotSpans", FEATURE_DISABLED_BY_DEFAULT};
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
85
src/base/allocator/partition_alloc_features.h
Normal file
85
src/base/allocator/partition_alloc_features.h
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/compiler_specific.h"
|
||||||
|
#include "base/metrics/field_trial_params.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
struct Feature;
|
||||||
|
|
||||||
|
namespace features {
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord;
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanRendererOnly;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing;
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
enum class BackupRefPtrEnabledProcesses {
|
||||||
|
// BRP enabled only in the browser process.
|
||||||
|
kBrowserOnly,
|
||||||
|
// BRP enabled only in the browser and renderer processes.
|
||||||
|
kBrowserAndRenderer,
|
||||||
|
// BRP enabled in all processes, except renderer.
|
||||||
|
kNonRenderer,
|
||||||
|
// BRP enabled in all processes.
|
||||||
|
kAllProcesses,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class BackupRefPtrMode {
|
||||||
|
// BRP is disabled across all partitions. Equivalent to the Finch flag being
|
||||||
|
// disabled.
|
||||||
|
kDisabled,
|
||||||
|
|
||||||
|
// BRP is enabled in the main partition, as well as certain Renderer-only
|
||||||
|
// partitions (if enabled in Renderer at all).
|
||||||
|
// This entails splitting the main partition.
|
||||||
|
kEnabled,
|
||||||
|
|
||||||
|
// BRP is disabled, but the main partition is split out, as if BRP was enabled
|
||||||
|
// in the "previous slot" mode.
|
||||||
|
kDisabledButSplitPartitions2Way,
|
||||||
|
|
||||||
|
// BRP is disabled, but the main partition *and* aligned partition are split
|
||||||
|
// out, as if BRP was enabled in the "before allocation" mode.
|
||||||
|
kDisabledButSplitPartitions3Way,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtr;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||||
|
kBackupRefPtrEnabledProcessesParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
|
||||||
|
kBackupRefPtrModeParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableDereferenceCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableExtractionCheckParam;
|
||||||
|
extern const BASE_EXPORT base::FeatureParam<bool>
|
||||||
|
kBackupRefPtrAsanEnableInstantiationCheckParam;
|
||||||
|
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocDCScan;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution;
|
||||||
|
extern const BASE_EXPORT Feature kPartitionAllocSortActiveSlotSpans;
|
||||||
|
|
||||||
|
} // namespace features
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
522
src/base/allocator/partition_alloc_support.cc
Normal file
522
src/base/allocator/partition_alloc_support.cc
Normal file
@ -0,0 +1,522 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_alloc_support.h"
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/allocator/buildflags.h"
|
||||||
|
#include "base/allocator/partition_alloc_features.h"
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
#include "base/bind.h"
|
||||||
|
#include "base/callback.h"
|
||||||
|
#include "base/check.h"
|
||||||
|
#include "base/debug/stack_trace.h"
|
||||||
|
#include "base/feature_list.h"
|
||||||
|
#include "base/immediate_crash.h"
|
||||||
|
#include "base/metrics/histogram_functions.h"
|
||||||
|
#include "base/metrics/histogram_macros.h"
|
||||||
|
#include "base/no_destructor.h"
|
||||||
|
#include "base/strings/string_piece.h"
|
||||||
|
#include "base/strings/string_split.h"
|
||||||
|
#include "base/strings/stringprintf.h"
|
||||||
|
#include "base/thread_annotations.h"
|
||||||
|
#include "base/threading/platform_thread.h"
|
||||||
|
#include "base/threading/thread_task_runner_handle.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
|
#include "base/timer/timer.h"
|
||||||
|
#include "base/trace_event/base_tracing.h"
|
||||||
|
#include "third_party/abseil-cpp/absl/types/optional.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
constexpr const char* ScannerIdToTracingString(
|
||||||
|
internal::StatsCollector::ScannerId id) {
|
||||||
|
switch (id) {
|
||||||
|
case internal::StatsCollector::ScannerId::kClear:
|
||||||
|
return "PCScan.Scanner.Clear";
|
||||||
|
case internal::StatsCollector::ScannerId::kScan:
|
||||||
|
return "PCScan.Scanner.Scan";
|
||||||
|
case internal::StatsCollector::ScannerId::kSweep:
|
||||||
|
return "PCScan.Scanner.Sweep";
|
||||||
|
case internal::StatsCollector::ScannerId::kOverall:
|
||||||
|
return "PCScan.Scanner";
|
||||||
|
case internal::StatsCollector::ScannerId::kNumIds:
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const char* MutatorIdToTracingString(
|
||||||
|
internal::StatsCollector::MutatorId id) {
|
||||||
|
switch (id) {
|
||||||
|
case internal::StatsCollector::MutatorId::kClear:
|
||||||
|
return "PCScan.Mutator.Clear";
|
||||||
|
case internal::StatsCollector::MutatorId::kScanStack:
|
||||||
|
return "PCScan.Mutator.ScanStack";
|
||||||
|
case internal::StatsCollector::MutatorId::kScan:
|
||||||
|
return "PCScan.Mutator.Scan";
|
||||||
|
case internal::StatsCollector::MutatorId::kOverall:
|
||||||
|
return "PCScan.Mutator";
|
||||||
|
case internal::StatsCollector::MutatorId::kNumIds:
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
|
||||||
|
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
|
||||||
|
class StatsReporterImpl final : public partition_alloc::StatsReporter {
|
||||||
|
public:
|
||||||
|
void ReportTraceEvent(internal::StatsCollector::ScannerId id,
|
||||||
|
[[maybe_unused]] uint32_t tid,
|
||||||
|
int64_t start_time_ticks_internal_value,
|
||||||
|
int64_t end_time_ticks_internal_value) override {
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
// TRACE_EVENT_* macros below drop most parameters when tracing is
|
||||||
|
// disabled at compile time.
|
||||||
|
const char* tracing_id = ScannerIdToTracingString(id);
|
||||||
|
const TimeTicks start_time =
|
||||||
|
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
|
||||||
|
const TimeTicks end_time =
|
||||||
|
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
|
||||||
|
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
|
||||||
|
perfetto::ThreadTrack::ForThread(tid), start_time);
|
||||||
|
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
|
||||||
|
end_time);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportTraceEvent(internal::StatsCollector::MutatorId id,
|
||||||
|
[[maybe_unused]] uint32_t tid,
|
||||||
|
int64_t start_time_ticks_internal_value,
|
||||||
|
int64_t end_time_ticks_internal_value) override {
|
||||||
|
#if BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
// TRACE_EVENT_* macros below drop most parameters when tracing is
|
||||||
|
// disabled at compile time.
|
||||||
|
const char* tracing_id = MutatorIdToTracingString(id);
|
||||||
|
const TimeTicks start_time =
|
||||||
|
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
|
||||||
|
const TimeTicks end_time =
|
||||||
|
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
|
||||||
|
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
|
||||||
|
perfetto::ThreadTrack::ForThread(tid), start_time);
|
||||||
|
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
|
||||||
|
end_time);
|
||||||
|
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportSurvivedQuarantineSize(size_t survived_size) override {
|
||||||
|
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantineSize",
|
||||||
|
survived_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportSurvivedQuarantinePercent(double survived_rate) override {
|
||||||
|
// Multiply by 1000 since TRACE_COUNTER1 expects integer. In catapult,
|
||||||
|
// divide back.
|
||||||
|
// TODO(bikineev): Remove after switching to perfetto.
|
||||||
|
TRACE_COUNTER1(kTraceCategory, "PCScan.SurvivedQuarantinePercent",
|
||||||
|
1000 * survived_rate);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReportStats(const char* stats_name, int64_t sample_in_usec) override {
|
||||||
|
TimeDelta sample = Microseconds(sample_in_usec);
|
||||||
|
UmaHistogramTimes(stats_name, sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr char kTraceCategory[] = "partition_alloc";
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
void RegisterPCScanStatsReporter() {
|
||||||
|
static StatsReporterImpl s_reporter;
|
||||||
|
static bool registered = false;
|
||||||
|
|
||||||
|
DCHECK(!registered);
|
||||||
|
|
||||||
|
internal::PCScan::RegisterStatsReporter(&s_reporter);
|
||||||
|
registered = true;
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void RunThreadCachePeriodicPurge() {
|
||||||
|
// Micros, since periodic purge should typically take at most a few ms.
|
||||||
|
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
|
||||||
|
TRACE_EVENT0("memory", "PeriodicPurge");
|
||||||
|
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||||
|
instance.RunPeriodicPurge();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||||
|
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||||
|
TRACE_EVENT0("base", "partition_alloc::MemoryReclaimer::Reclaim()");
|
||||||
|
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
|
||||||
|
|
||||||
|
{
|
||||||
|
// Micros, since memory reclaiming should typically take at most a few ms.
|
||||||
|
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.MemoryReclaim");
|
||||||
|
instance->ReclaimNormal();
|
||||||
|
}
|
||||||
|
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
|
||||||
|
task_runner->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void StartThreadCachePeriodicPurge() {
|
||||||
|
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||||
|
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunThreadCachePeriodicPurge), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
void StartMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||||
|
// Can be called several times.
|
||||||
|
static bool is_memory_reclaimer_running = false;
|
||||||
|
if (is_memory_reclaimer_running)
|
||||||
|
return;
|
||||||
|
is_memory_reclaimer_running = true;
|
||||||
|
|
||||||
|
// The caller of the API fully controls where running the reclaim.
|
||||||
|
// However there are a few reasons to recommend that the caller runs
|
||||||
|
// it on the main thread:
|
||||||
|
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
|
||||||
|
// is more likely in cache when executing on the main thread.
|
||||||
|
// - Memory reclaim takes the partition lock for each partition. As a
|
||||||
|
// consequence, while reclaim is running, the main thread is unlikely to be
|
||||||
|
// able to make progress, as it would be waiting on the lock.
|
||||||
|
// - Finally, this runs in idle time only, so there should be no visible
|
||||||
|
// impact.
|
||||||
|
//
|
||||||
|
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
|
||||||
|
// seconds is useful. Since this is meant to run during idle time only, it is
|
||||||
|
// a reasonable starting point balancing effectivenes vs cost. See
|
||||||
|
// crbug.com/942512 for details and experimental results.
|
||||||
|
auto* instance = ::partition_alloc::MemoryReclaimer::Instance();
|
||||||
|
TimeDelta delay =
|
||||||
|
Microseconds(instance->GetRecommendedReclaimIntervalInMicroseconds());
|
||||||
|
task_runner->PostDelayedTask(
|
||||||
|
FROM_HERE, BindOnce(RunMemoryReclaimer, task_runner), delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
|
||||||
|
std::map<std::string, std::string> trials;
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
// BackupRefPtr_Effective and PCScan_Effective record whether or not
|
||||||
|
// BackupRefPtr and/or PCScan are enabled. The experiments aren't independent,
|
||||||
|
// so having a synthetic Finch will help look only at cases where one isn't
|
||||||
|
// affected by the other.
|
||||||
|
|
||||||
|
// Whether PartitionAllocBackupRefPtr is enabled (as determined by
|
||||||
|
// FeatureList::IsEnabled).
|
||||||
|
[[maybe_unused]] bool brp_finch_enabled = false;
|
||||||
|
// Whether PartitionAllocBackupRefPtr is set up for the default behavior. The
|
||||||
|
// default behavior is when either the Finch flag is disabled, or is enabled
|
||||||
|
// in brp-mode=disabled (these two options are equivalent).
|
||||||
|
[[maybe_unused]] bool brp_nondefault_behavior = false;
|
||||||
|
// Whether PartitionAllocBackupRefPtr is set up to enable BRP protection. It
|
||||||
|
// requires the Finch flag to be enabled and brp-mode!=disabled*. Some modes,
|
||||||
|
// e.g. disabled-but-3-way-split, do something (hence can't be considered the
|
||||||
|
// default behavior), but don't enable BRP protection.
|
||||||
|
[[maybe_unused]] bool brp_truly_enabled = false;
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (FeatureList::IsEnabled(features::kPartitionAllocBackupRefPtr))
|
||||||
|
brp_finch_enabled = true;
|
||||||
|
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() !=
|
||||||
|
features::BackupRefPtrMode::kDisabled)
|
||||||
|
brp_nondefault_behavior = true;
|
||||||
|
if (brp_finch_enabled && features::kBackupRefPtrModeParam.Get() ==
|
||||||
|
features::BackupRefPtrMode::kEnabled)
|
||||||
|
brp_truly_enabled = true;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
[[maybe_unused]] bool pcscan_enabled =
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
|
||||||
|
#else
|
||||||
|
false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::string brp_group_name = "Unavailable";
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (pcscan_enabled) {
|
||||||
|
// If PCScan is enabled, just ignore the population.
|
||||||
|
brp_group_name = "Ignore_PCScanIsOn";
|
||||||
|
} else if (!brp_finch_enabled) {
|
||||||
|
// The control group is actually disguised as "enabled", but in fact it's
|
||||||
|
// disabled using a param. This is to differentiate the population that
|
||||||
|
// participates in the control group, from the population that isn't in any
|
||||||
|
// group.
|
||||||
|
brp_group_name = "Ignore_NoGroup";
|
||||||
|
} else {
|
||||||
|
switch (features::kBackupRefPtrModeParam.Get()) {
|
||||||
|
case features::BackupRefPtrMode::kDisabled:
|
||||||
|
brp_group_name = "Disabled";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kEnabled:
|
||||||
|
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||||
|
brp_group_name = "EnabledPrevSlot";
|
||||||
|
#else
|
||||||
|
brp_group_name = "EnabledBeforeAlloc";
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
|
||||||
|
brp_group_name = "DisabledBut2WaySplit";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
|
||||||
|
brp_group_name = "DisabledBut3WaySplit";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (features::kBackupRefPtrModeParam.Get() !=
|
||||||
|
features::BackupRefPtrMode::kDisabled) {
|
||||||
|
std::string process_selector;
|
||||||
|
switch (features::kBackupRefPtrEnabledProcessesParam.Get()) {
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kBrowserOnly:
|
||||||
|
process_selector = "BrowserOnly";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
|
||||||
|
process_selector = "BrowserAndRenderer";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kNonRenderer:
|
||||||
|
process_selector = "NonRenderer";
|
||||||
|
break;
|
||||||
|
case features::BackupRefPtrEnabledProcesses::kAllProcesses:
|
||||||
|
process_selector = "AllProcesses";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
brp_group_name += ("_" + process_selector);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
trials.emplace("BackupRefPtr_Effective", brp_group_name);
|
||||||
|
|
||||||
|
// On 32-bit architectures, PCScan is not supported and permanently disabled.
|
||||||
|
// Don't lump it into "Disabled", so that belonging to "Enabled"/"Disabled" is
|
||||||
|
// fully controlled by Finch and thus have identical population sizes.
|
||||||
|
std::string pcscan_group_name = "Unavailable";
|
||||||
|
std::string pcscan_group_name_fallback = "Unavailable";
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
if (brp_truly_enabled) {
|
||||||
|
// If BRP protection is enabled, just ignore the population. Check
|
||||||
|
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes
|
||||||
|
// where BRP protection is actually disabled.
|
||||||
|
pcscan_group_name = "Ignore_BRPIsOn";
|
||||||
|
} else {
|
||||||
|
pcscan_group_name = (pcscan_enabled ? "Enabled" : "Disabled");
|
||||||
|
}
|
||||||
|
// In case we are incorrect that PCScan is independent of partition-split
|
||||||
|
// modes, create a fallback trial that only takes into account the BRP Finch
|
||||||
|
// settings that preserve the default behavior.
|
||||||
|
if (brp_nondefault_behavior) {
|
||||||
|
pcscan_group_name_fallback = "Ignore_BRPIsOn";
|
||||||
|
} else {
|
||||||
|
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
|
||||||
|
}
|
||||||
|
#endif // defined(PA_ALLOW_PCSCAN)
|
||||||
|
trials.emplace("PCScan_Effective", pcscan_group_name);
|
||||||
|
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
trials.emplace("FakeBinaryExperiment",
|
||||||
|
#if BUILDFLAG(USE_FAKE_BINARY_EXPERIMENT)
|
||||||
|
"Enabled"
|
||||||
|
#else
|
||||||
|
"Disabled"
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
return trials;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
internal::PartitionLock g_stack_trace_buffer_lock;
|
||||||
|
|
||||||
|
struct StackTraceWithID {
|
||||||
|
debug::StackTrace stack_trace;
|
||||||
|
uintptr_t id = 0;
|
||||||
|
};
|
||||||
|
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
|
||||||
|
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
|
||||||
|
|
||||||
|
void DanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
// This is called from inside the allocator. No allocation is allowed.
|
||||||
|
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
|
||||||
|
#if DCHECK_IS_ON()
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer)
|
||||||
|
PA_DCHECK(!entry || entry->id != id);
|
||||||
|
#endif // DCHECK_IS_ON()
|
||||||
|
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
|
||||||
|
if (!entry) {
|
||||||
|
entry = {debug::StackTrace(), id};
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The StackTrace hasn't been recorded, because the buffer isn't large
|
||||||
|
// enough.
|
||||||
|
}
|
||||||
|
|
||||||
|
// From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one
|
||||||
|
// whose id match |id|. Return nullopt if not found.
|
||||||
|
absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
|
||||||
|
if (entry && entry->id == id) {
|
||||||
|
debug::StackTrace stack_trace = std::move(entry->stack_trace);
|
||||||
|
entry = absl::nullopt;
|
||||||
|
return stack_trace;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return absl::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract from the StackTrace output, the signature of the pertinent caller.
|
||||||
|
// This function is meant to be used only by Chromium developers, to list what
|
||||||
|
// are all the dangling raw_ptr occurrences in a table.
|
||||||
|
std::string ExtractDanglingPtrSignature(std::string stacktrace) {
|
||||||
|
LOG(ERROR) << stacktrace;
|
||||||
|
std::vector<StringPiece> lines = SplitStringPiece(
|
||||||
|
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
|
||||||
|
|
||||||
|
// We are looking for the callers of the function releasing the raw_ptr and
|
||||||
|
// freeing memory:
|
||||||
|
const StringPiece callees[] = {
|
||||||
|
"internal::BackupRefPtrImpl<>::ReleaseInternal()",
|
||||||
|
"internal::PartitionFree()",
|
||||||
|
"base::(anonymous namespace)::FreeFn()",
|
||||||
|
};
|
||||||
|
size_t caller_index = 0;
|
||||||
|
for (size_t i = 0; i < lines.size(); ++i) {
|
||||||
|
for (const auto& callee : callees) {
|
||||||
|
if (lines[i].find(callee) != StringPiece::npos) {
|
||||||
|
caller_index = i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (caller_index >= lines.size()) {
|
||||||
|
return "undefined";
|
||||||
|
}
|
||||||
|
StringPiece caller = lines[caller_index];
|
||||||
|
|
||||||
|
// |callers| follows the following format:
|
||||||
|
//
|
||||||
|
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend()
|
||||||
|
// -- -------------- -----------------------------------------------
|
||||||
|
// Depth Address Function
|
||||||
|
|
||||||
|
size_t address_start = caller.find(' ');
|
||||||
|
size_t function_start = caller.find(' ', address_start + 1);
|
||||||
|
|
||||||
|
if (address_start == caller.npos || function_start == caller.npos) {
|
||||||
|
return "undefined";
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::string(caller.substr(function_start + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void DanglingRawPtrReleased(uintptr_t id) {
|
||||||
|
// This is called from raw_ptr<>'s release operation. Making allocations is
|
||||||
|
// allowed. In particular, symbolizing and printing the StackTraces may
|
||||||
|
// allocate memory.
|
||||||
|
|
||||||
|
debug::StackTrace stack_trace_release;
|
||||||
|
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
|
||||||
|
|
||||||
|
if (FeatureList::IsEnabled(features::kPartitionAllocDanglingPtrRecord)) {
|
||||||
|
if (stack_trace_free) {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"[DanglingSignature]\t%s\t%s",
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
|
||||||
|
} else {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"[DanglingSignature]\t%s\tmissing-stacktrace",
|
||||||
|
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stack_trace_free) {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
|
||||||
|
":\n\n"
|
||||||
|
"The memory was freed at:\n%s\n"
|
||||||
|
"The dangling raw_ptr was released at:\n%s",
|
||||||
|
id, stack_trace_free->ToString().c_str(),
|
||||||
|
stack_trace_release.ToString().c_str());
|
||||||
|
} else {
|
||||||
|
LOG(ERROR) << StringPrintf(
|
||||||
|
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
|
||||||
|
":\n\n"
|
||||||
|
"It was not recorded where the memory was freed.\n\n"
|
||||||
|
"The dangling raw_ptr was released at:\n%s",
|
||||||
|
id, stack_trace_release.ToString().c_str());
|
||||||
|
}
|
||||||
|
IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearDanglingRawPtrBuffer() {
|
||||||
|
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
|
||||||
|
g_stack_trace_buffer = DanglingRawPtrBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void InstallDanglingRawPtrChecks() {
|
||||||
|
// Clearing storage is useful for running multiple unit tests without
|
||||||
|
// restarting the test executable.
|
||||||
|
ClearDanglingRawPtrBuffer();
|
||||||
|
|
||||||
|
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
|
||||||
|
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleased);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there
|
||||||
|
// is a dangling pointer, we should crash at some point. Consider providing an
|
||||||
|
// API to periodically check the buffer.
|
||||||
|
|
||||||
|
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
void InstallDanglingRawPtrChecks() {}
|
||||||
|
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
42
src/base/allocator/partition_alloc_support.h
Normal file
42
src/base/allocator/partition_alloc_support.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/base_export.h"
|
||||||
|
#include "base/memory/scoped_refptr.h"
|
||||||
|
#include "base/task/sequenced_task_runner.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
namespace allocator {
|
||||||
|
|
||||||
|
#if defined(PA_ALLOW_PCSCAN)
|
||||||
|
BASE_EXPORT void RegisterPCScanStatsReporter();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Starts a periodic timer on the current thread to purge all thread caches.
|
||||||
|
BASE_EXPORT void StartThreadCachePeriodicPurge();
|
||||||
|
|
||||||
|
BASE_EXPORT void StartMemoryReclaimer(
|
||||||
|
scoped_refptr<SequencedTaskRunner> task_runner);
|
||||||
|
|
||||||
|
BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
|
||||||
|
|
||||||
|
// Install handlers for when dangling raw_ptr(s) have been detected. This prints
|
||||||
|
// two StackTraces. One where the memory is freed, one where the last dangling
|
||||||
|
// raw_ptr stopped referencing it.
|
||||||
|
//
|
||||||
|
// This is currently effective, only when compiled with
|
||||||
|
// `enable_dangling_raw_ptr_checks` build flag.
|
||||||
|
BASE_EXPORT void InstallDanglingRawPtrChecks();
|
||||||
|
|
||||||
|
} // namespace allocator
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOC_SUPPORT_H_
|
484
src/base/allocator/partition_allocator/BUILD.gn
Normal file
484
src/base/allocator/partition_allocator/BUILD.gn
Normal file
@ -0,0 +1,484 @@
|
|||||||
|
# Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
import("//base/allocator/allocator.gni")
|
||||||
|
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||||
|
import("//build/buildflag_header.gni")
|
||||||
|
import("//build/config/chromecast_build.gni")
|
||||||
|
import("//build/config/chromeos/ui_mode.gni")
|
||||||
|
import("//build/config/dcheck_always_on.gni")
|
||||||
|
import("//build/config/logging.gni")
|
||||||
|
|
||||||
|
# Add partition_alloc.gni and import it for partition_alloc configs.
|
||||||
|
|
||||||
|
config("partition_alloc_implementation") {
|
||||||
|
# See also: `partition_alloc_base/component_export.h`
|
||||||
|
defines = [ "IS_PARTITION_ALLOC_IMPL" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
config("memory_tagging") {
|
||||||
|
if (current_cpu == "arm64" && is_clang &&
|
||||||
|
(is_linux || is_chromeos || is_android || is_fuchsia)) {
|
||||||
|
# base/ has access to the MTE intrinsics because it needs to use them,
|
||||||
|
# but they're not backwards compatible. Use base::CPU::has_mte()
|
||||||
|
# beforehand to confirm or use indirect functions (ifuncs) to select
|
||||||
|
# an MTE-specific implementation at dynamic link-time.
|
||||||
|
cflags = [
|
||||||
|
"-Xclang",
|
||||||
|
"-target-feature",
|
||||||
|
"-Xclang",
|
||||||
|
"+mte",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_fuchsia) {
|
||||||
|
config("fuchsia_sync_lib") {
|
||||||
|
libs = [
|
||||||
|
"sync", # Used by spinning_mutex.h.
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (make_partition_alloc_standalone) {
|
||||||
|
partition_alloc_target_type = "component"
|
||||||
|
} else {
|
||||||
|
partition_alloc_target_type = "source_set"
|
||||||
|
}
|
||||||
|
|
||||||
|
target(partition_alloc_target_type, "partition_alloc") {
|
||||||
|
sources = [
|
||||||
|
"address_pool_manager.cc",
|
||||||
|
"address_pool_manager.h",
|
||||||
|
"address_pool_manager_bitmap.cc",
|
||||||
|
"address_pool_manager_bitmap.h",
|
||||||
|
"address_pool_manager_types.h",
|
||||||
|
"address_space_randomization.cc",
|
||||||
|
"address_space_randomization.h",
|
||||||
|
"address_space_stats.h",
|
||||||
|
"allocation_guard.cc",
|
||||||
|
"allocation_guard.h",
|
||||||
|
"dangling_raw_ptr_checks.cc",
|
||||||
|
"dangling_raw_ptr_checks.h",
|
||||||
|
"memory_reclaimer.cc",
|
||||||
|
"memory_reclaimer.h",
|
||||||
|
"oom.cc",
|
||||||
|
"oom.h",
|
||||||
|
"oom_callback.cc",
|
||||||
|
"oom_callback.h",
|
||||||
|
"page_allocator.cc",
|
||||||
|
"page_allocator.h",
|
||||||
|
"page_allocator_constants.h",
|
||||||
|
"page_allocator_internal.h",
|
||||||
|
"partition_address_space.cc",
|
||||||
|
"partition_address_space.h",
|
||||||
|
"partition_alloc-inl.h",
|
||||||
|
"partition_alloc.cc",
|
||||||
|
"partition_alloc.h",
|
||||||
|
"partition_alloc_base/atomic_ref_count.h",
|
||||||
|
"partition_alloc_base/bit_cast.h",
|
||||||
|
"partition_alloc_base/bits.h",
|
||||||
|
"partition_alloc_base/check.cc",
|
||||||
|
"partition_alloc_base/check.h",
|
||||||
|
"partition_alloc_base/compiler_specific.h",
|
||||||
|
"partition_alloc_base/component_export.h",
|
||||||
|
"partition_alloc_base/cpu.cc",
|
||||||
|
"partition_alloc_base/cpu.h",
|
||||||
|
"partition_alloc_base/cxx17_backports.h",
|
||||||
|
"partition_alloc_base/debug/alias.cc",
|
||||||
|
"partition_alloc_base/debug/alias.h",
|
||||||
|
"partition_alloc_base/gtest_prod_util.h",
|
||||||
|
"partition_alloc_base/immediate_crash.h",
|
||||||
|
"partition_alloc_base/logging.cc",
|
||||||
|
"partition_alloc_base/logging.h",
|
||||||
|
"partition_alloc_base/memory/ref_counted.cc",
|
||||||
|
"partition_alloc_base/memory/ref_counted.h",
|
||||||
|
"partition_alloc_base/memory/scoped_policy.h",
|
||||||
|
"partition_alloc_base/memory/scoped_refptr.h",
|
||||||
|
"partition_alloc_base/migration_adapter.h",
|
||||||
|
"partition_alloc_base/no_destructor.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math.h",
|
||||||
|
"partition_alloc_base/numerics/checked_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math.h",
|
||||||
|
"partition_alloc_base/numerics/clamped_math_impl.h",
|
||||||
|
"partition_alloc_base/numerics/math_constants.h",
|
||||||
|
"partition_alloc_base/numerics/ostream_operators.h",
|
||||||
|
"partition_alloc_base/numerics/ranges.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_conversions_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_arm_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
|
||||||
|
"partition_alloc_base/numerics/safe_math_shared_impl.h",
|
||||||
|
"partition_alloc_base/posix/eintr_wrapper.h",
|
||||||
|
"partition_alloc_base/rand_util.cc",
|
||||||
|
"partition_alloc_base/rand_util.h",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error.h",
|
||||||
|
"partition_alloc_base/strings/stringprintf.cc",
|
||||||
|
"partition_alloc_base/strings/stringprintf.h",
|
||||||
|
"partition_alloc_base/sys_byteorder.h",
|
||||||
|
"partition_alloc_base/thread_annotations.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_ref.h",
|
||||||
|
"partition_alloc_base/time/time.cc",
|
||||||
|
"partition_alloc_base/time/time.h",
|
||||||
|
"partition_alloc_base/time/time_override.cc",
|
||||||
|
"partition_alloc_base/time/time_override.h",
|
||||||
|
"partition_alloc_base/win/windows_types.h",
|
||||||
|
"partition_alloc_check.h",
|
||||||
|
"partition_alloc_config.h",
|
||||||
|
"partition_alloc_constants.h",
|
||||||
|
"partition_alloc_forward.h",
|
||||||
|
"partition_alloc_hooks.cc",
|
||||||
|
"partition_alloc_hooks.h",
|
||||||
|
"partition_alloc_notreached.h",
|
||||||
|
"partition_bucket.cc",
|
||||||
|
"partition_bucket.h",
|
||||||
|
"partition_bucket_lookup.h",
|
||||||
|
"partition_cookie.h",
|
||||||
|
"partition_direct_map_extent.h",
|
||||||
|
"partition_freelist_entry.h",
|
||||||
|
"partition_lock.h",
|
||||||
|
"partition_oom.cc",
|
||||||
|
"partition_oom.h",
|
||||||
|
"partition_page.cc",
|
||||||
|
"partition_page.h",
|
||||||
|
"partition_ref_count.h",
|
||||||
|
"partition_root.cc",
|
||||||
|
"partition_root.h",
|
||||||
|
"partition_stats.cc",
|
||||||
|
"partition_stats.h",
|
||||||
|
"partition_tag.h",
|
||||||
|
"partition_tag_bitmap.h",
|
||||||
|
"partition_tls.h",
|
||||||
|
"random.cc",
|
||||||
|
"random.h",
|
||||||
|
"reservation_offset_table.cc",
|
||||||
|
"reservation_offset_table.h",
|
||||||
|
"spinning_mutex.cc",
|
||||||
|
"spinning_mutex.h",
|
||||||
|
"starscan/logging.h",
|
||||||
|
"starscan/metadata_allocator.cc",
|
||||||
|
"starscan/metadata_allocator.h",
|
||||||
|
"starscan/pcscan.cc",
|
||||||
|
"starscan/pcscan.h",
|
||||||
|
"starscan/pcscan_internal.cc",
|
||||||
|
"starscan/pcscan_internal.h",
|
||||||
|
"starscan/pcscan_scheduling.cc",
|
||||||
|
"starscan/pcscan_scheduling.h",
|
||||||
|
"starscan/raceful_worklist.h",
|
||||||
|
"starscan/scan_loop.h",
|
||||||
|
"starscan/snapshot.cc",
|
||||||
|
"starscan/snapshot.h",
|
||||||
|
"starscan/stack/stack.cc",
|
||||||
|
"starscan/stack/stack.h",
|
||||||
|
"starscan/starscan_fwd.h",
|
||||||
|
"starscan/state_bitmap.h",
|
||||||
|
"starscan/stats_collector.cc",
|
||||||
|
"starscan/stats_collector.h",
|
||||||
|
"starscan/stats_reporter.h",
|
||||||
|
"starscan/write_protector.cc",
|
||||||
|
"starscan/write_protector.h",
|
||||||
|
"tagging.cc",
|
||||||
|
"tagging.h",
|
||||||
|
"thread_cache.cc",
|
||||||
|
"thread_cache.h",
|
||||||
|
"yield_processor.h",
|
||||||
|
]
|
||||||
|
defines = []
|
||||||
|
if (is_win) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_win.h",
|
||||||
|
"partition_alloc_base/rand_util_win.cc",
|
||||||
|
"partition_alloc_base/scoped_clear_last_error_win.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_win.cc",
|
||||||
|
"partition_alloc_base/time/time_win.cc",
|
||||||
|
"partition_tls_win.cc",
|
||||||
|
]
|
||||||
|
} else if (is_posix) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_posix.cc",
|
||||||
|
"page_allocator_internals_posix.h",
|
||||||
|
"partition_alloc_base/files/file_util.h",
|
||||||
|
"partition_alloc_base/files/file_util_posix.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/rand_util_posix.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
]
|
||||||
|
|
||||||
|
if (is_android || is_chromeos_ash) {
|
||||||
|
sources += [ "partition_alloc_base/time/time_android.cc" ]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
sources += [ "partition_alloc_base/time/time_mac.mm" ]
|
||||||
|
} else {
|
||||||
|
sources += [ "partition_alloc_base/time/time_now_posix.cc" ]
|
||||||
|
}
|
||||||
|
} else if (is_fuchsia) {
|
||||||
|
sources += [
|
||||||
|
"page_allocator_internals_fuchsia.h",
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.cc",
|
||||||
|
"partition_alloc_base/fuchsia/fuchsia_logging.h",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.cc",
|
||||||
|
"partition_alloc_base/posix/safe_strerror.h",
|
||||||
|
"partition_alloc_base/rand_util_fuchsia.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_internal_posix.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_conversion_posix.cc",
|
||||||
|
"partition_alloc_base/time/time_fuchsia.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
# Only android build requires native_library, and native_library depends
|
||||||
|
# on file_path. So file_path is added if is_android = true.
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/files/file_path.cc",
|
||||||
|
"partition_alloc_base/files/file_path.h",
|
||||||
|
"partition_alloc_base/native_library.cc",
|
||||||
|
"partition_alloc_base/native_library.h",
|
||||||
|
"partition_alloc_base/native_library_posix.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_apple) {
|
||||||
|
# Apple-specific utilities
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/mac/foundation_util.h",
|
||||||
|
"partition_alloc_base/mac/foundation_util.mm",
|
||||||
|
"partition_alloc_base/mac/mac_util.h",
|
||||||
|
"partition_alloc_base/mac/mac_util.mm",
|
||||||
|
"partition_alloc_base/mac/scoped_cftyperef.h",
|
||||||
|
"partition_alloc_base/mac/scoped_typeref.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (current_cpu == "x64") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "x86") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
|
||||||
|
} else if (current_cpu == "arm64") {
|
||||||
|
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||||
|
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
|
||||||
|
} else {
|
||||||
|
# To support a trampoline for another arch, please refer to v8/src/heap/base.
|
||||||
|
}
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":logging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
"//base:synchronization_buildflags",
|
||||||
|
"//base:tracing_buildflags",
|
||||||
|
"//build:branding_buildflags",
|
||||||
|
"//build/config/compiler:compiler_buildflags",
|
||||||
|
]
|
||||||
|
|
||||||
|
# TODO(https://crbug.com/1151236): Remove this dependency on Abseil once PA
|
||||||
|
# no longer includes any headers directly from base/.
|
||||||
|
deps = [ "//third_party/abseil-cpp:absl" ]
|
||||||
|
configs += [
|
||||||
|
":partition_alloc_implementation",
|
||||||
|
":memory_tagging",
|
||||||
|
]
|
||||||
|
public_configs = []
|
||||||
|
if (is_android) {
|
||||||
|
# tagging.cc requires __arm_mte_set_* functions.
|
||||||
|
deps += [ "//third_party/android_ndk:cpu_features" ]
|
||||||
|
}
|
||||||
|
if (is_fuchsia) {
|
||||||
|
public_deps += [
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/fit",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/zx",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Needed for users of spinning_mutex.h, which for performance reasons,
|
||||||
|
# contains inlined calls to `libsync` inside the header file.
|
||||||
|
# It appends an entry to the "libs" section of the dependent target.
|
||||||
|
public_configs += [ ":fuchsia_sync_lib" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
frameworks = []
|
||||||
|
if (is_mac) {
|
||||||
|
# SecTaskGetCodeSignStatus needs:
|
||||||
|
frameworks += [ "Security.framework" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
configs += [ "//build/config/compiler:wexit_time_destructors" ]
|
||||||
|
|
||||||
|
# Partition alloc is relatively hot (>1% of cycles for users of CrOS). Use speed-focused
|
||||||
|
# optimizations for it.
|
||||||
|
if (!is_debug) {
|
||||||
|
configs -= [ "//build/config/compiler:default_optimization" ]
|
||||||
|
configs += [ "//build/config/compiler:optimize_speed" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
source_set("test_support") {
|
||||||
|
sources = [
|
||||||
|
"extended_api.cc",
|
||||||
|
"extended_api.h",
|
||||||
|
"partition_alloc_base/threading/platform_thread_for_testing.h",
|
||||||
|
]
|
||||||
|
if (is_posix) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_fuchsia) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
|
||||||
|
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_win) {
|
||||||
|
sources +=
|
||||||
|
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
|
||||||
|
}
|
||||||
|
if (is_mac || is_ios) {
|
||||||
|
sources +=
|
||||||
|
[ "partition_alloc_base/threading/platform_thread_mac_for_testing.mm" ]
|
||||||
|
}
|
||||||
|
if (is_linux || is_chromeos) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (is_android) {
|
||||||
|
sources += [
|
||||||
|
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":logging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
"//base:synchronization_buildflags",
|
||||||
|
"//base:tracing_buildflags",
|
||||||
|
"//build:branding_buildflags",
|
||||||
|
"//build/config/compiler:compiler_buildflags",
|
||||||
|
]
|
||||||
|
public_configs = []
|
||||||
|
if (is_fuchsia) {
|
||||||
|
public_deps += [
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/fit",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||||
|
"//third_party/fuchsia-sdk/sdk/pkg/zx",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Needed for users of spinning_mutex.h, which for performance reasons,
|
||||||
|
# contains inlined calls to `libsync` inside the header file.
|
||||||
|
# It appends an entry to the "libs" section of the dependent target.
|
||||||
|
public_configs += [ ":fuchsia_sync_lib" ]
|
||||||
|
}
|
||||||
|
deps = [ "//base:base" ]
|
||||||
|
|
||||||
|
if (!is_debug) {
|
||||||
|
configs -= [ "//build/config/compiler:default_optimization" ]
|
||||||
|
configs += [ "//build/config/compiler:optimize_speed" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("partition_alloc_buildflags") {
|
||||||
|
header = "partition_alloc_buildflags.h"
|
||||||
|
|
||||||
|
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||||
|
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||||
|
"Partition alloc requires the allocator shim")
|
||||||
|
|
||||||
|
# BackupRefPtr(BRP) build flags.
|
||||||
|
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||||
|
_put_ref_count_in_previous_slot =
|
||||||
|
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||||
|
_enable_backup_ref_ptr_slow_checks =
|
||||||
|
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||||
|
_enable_dangling_raw_ptr_checks =
|
||||||
|
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||||
|
|
||||||
|
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||||
|
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||||
|
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||||
|
|
||||||
|
_record_alloc_info = false
|
||||||
|
|
||||||
|
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
|
||||||
|
# The buildflags (expect RECORD_ALLOC_INFO) are used by both chrome and
|
||||||
|
# partition alloc. For partition alloc,
|
||||||
|
# gen/base/allocator/partition_allocator/partition_alloc_buildflags.h
|
||||||
|
# defines and partition alloc includes the header file. For chrome,
|
||||||
|
# gen/base/allocator/buildflags.h defines and chrome includes.
|
||||||
|
flags = [
|
||||||
|
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||||
|
|
||||||
|
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||||
|
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||||
|
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||||
|
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||||
|
|
||||||
|
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||||
|
|
||||||
|
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromecast_buildflags") {
|
||||||
|
header = "chromecast_buildflags.h"
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_IS_CAST_ANDROID=$is_cast_android",
|
||||||
|
"PA_IS_CASTOS=$is_castos",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("chromeos_buildflags") {
|
||||||
|
header = "chromeos_buildflags.h"
|
||||||
|
|
||||||
|
flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("logging_buildflags") {
|
||||||
|
header = "logging_buildflags.h"
|
||||||
|
|
||||||
|
flags = [ "PA_ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
buildflag_header("debugging_buildflags") {
|
||||||
|
header = "debugging_buildflags.h"
|
||||||
|
header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug"
|
||||||
|
|
||||||
|
# Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`,
|
||||||
|
# but avails it as a buildflag.
|
||||||
|
_dcheck_is_on = is_debug || dcheck_always_on
|
||||||
|
|
||||||
|
flags = [
|
||||||
|
"PA_DCHECK_IS_ON=$_dcheck_is_on",
|
||||||
|
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
group("buildflags") {
|
||||||
|
public_deps = [
|
||||||
|
":chromecast_buildflags",
|
||||||
|
":chromeos_buildflags",
|
||||||
|
":debugging_buildflags",
|
||||||
|
":logging_buildflags",
|
||||||
|
":partition_alloc_buildflags",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
|
||||||
|
# move test code here. i.e. test("partition_alloc_tests") { ... } and
|
||||||
|
# test("partition_alloc_perftests").
|
||||||
|
|
30
src/base/allocator/partition_allocator/DEPS
Normal file
30
src/base/allocator/partition_allocator/DEPS
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# PartitionAlloc is planned to be extracted into a standalone library, and
|
||||||
|
# therefore dependencies need to be strictly controlled and minimized.
|
||||||
|
|
||||||
|
noparent = True
|
||||||
|
|
||||||
|
include_rules = [
|
||||||
|
"+build/build_config.h",
|
||||||
|
"+build/buildflag.h",
|
||||||
|
"+third_party/lss/linux_syscall_support.h",
|
||||||
|
]
|
||||||
|
|
||||||
|
specific_include_rules = {
|
||||||
|
".*_(perf|unit)test\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
"+base/debug/proc_maps_linux.h",
|
||||||
|
"+base/system/sys_info.h",
|
||||||
|
"+base/test/gtest_util.h",
|
||||||
|
"+base/timer/lap_timer.h",
|
||||||
|
"+base/win/windows_version.h",
|
||||||
|
"+testing/gmock/include/gmock/gmock.h",
|
||||||
|
"+testing/gtest/include/gtest/gtest.h",
|
||||||
|
"+testing/perf/perf_result_reporter.h",
|
||||||
|
],
|
||||||
|
"extended_api\.cc$": [
|
||||||
|
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||||
|
],
|
||||||
|
"gtest_prod_util\.h$": [
|
||||||
|
"+testing/gtest/include/gtest/gtest_prod.h",
|
||||||
|
],
|
||||||
|
}
|
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
monorail {
|
||||||
|
component: "Blink>MemoryAllocator>Partition"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Also security-dev@chromium.org
|
||||||
|
team_email: "platform-architecture-dev@chromium.org"
|
3
src/base/allocator/partition_allocator/OWNERS
Normal file
3
src/base/allocator/partition_allocator/OWNERS
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
bartekn@chromium.org
|
||||||
|
haraken@chromium.org
|
||||||
|
lizeb@chromium.org
|
180
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
180
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
# PartitionAlloc Design
|
||||||
|
|
||||||
|
This document describes PartitionAlloc at a high level, with some architectural
|
||||||
|
details. For implementation details, see the comments in
|
||||||
|
`partition_alloc_constants.h`.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
PartitionAlloc is a memory allocator optimized for space efficiency,
|
||||||
|
allocation latency, and security.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
This document largely avoids defining terms; consult the
|
||||||
|
[glossary](./glossary.md) for a complete reference.
|
||||||
|
***
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
|
||||||
|
paths of allocation and deallocation require very few (reasonably predictable)
|
||||||
|
branches. The number of operations in the fast paths is minimal, leading to the
|
||||||
|
possibility of inlining.
|
||||||
|
|
||||||
|
However, even the fast path isn't the fastest, because it requires taking
|
||||||
|
a per-partition lock. Although we optimized the lock, there was still room for
|
||||||
|
improvement; to this end, we introduced the thread cache.
|
||||||
|
The thread cache has been tailored to satisfy a vast majority of requests by
|
||||||
|
allocating from and releasing memory to the main allocator in batches,
|
||||||
|
amortizing lock acquisition and further improving locality while not trapping
|
||||||
|
excess memory.
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
Security is one of the important goals of PartitionAlloc.
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that different partitions exist in different regions
|
||||||
|
of the process's address space. When the caller has freed all objects contained
|
||||||
|
in a page in a partition, PartitionAlloc returns the physical memory to the
|
||||||
|
operating system, but continues to reserve the region of address space.
|
||||||
|
PartitionAlloc will only reuse an address space region for the same partition.
|
||||||
|
|
||||||
|
Similarly, one page can contain only objects from the same bucket.
|
||||||
|
When freed, PartitionAlloc returns the physical memory, but continues to reserve
|
||||||
|
the region for this very bucket.
|
||||||
|
|
||||||
|
The above techniques help avoid type confusion attacks. Note, however, these
|
||||||
|
apply only to normal buckets and not to direct map, as it'd waste too much
|
||||||
|
address space.
|
||||||
|
|
||||||
|
PartitionAlloc also guarantees that:
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt into, out of, or between
|
||||||
|
partitions. There are guard pages at the beginning and the end of each memory
|
||||||
|
region owned by a partition.
|
||||||
|
|
||||||
|
* Linear overflows/underflows cannot corrupt the allocation metadata.
|
||||||
|
PartitionAlloc records metadata in a dedicated, out-of-line region (not
|
||||||
|
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
|
||||||
|
exception.)
|
||||||
|
|
||||||
|
* Partial pointer overwrite of freelist pointer should fault.
|
||||||
|
|
||||||
|
* Direct map allocations have guard pages at the beginning and the end.
|
||||||
|
|
||||||
|
### Alignment
|
||||||
|
|
||||||
|
PartitionAlloc guarantees that returned pointers are aligned on
|
||||||
|
`partition_alloc::internal::kAlignment` boundary (typically 16B on
|
||||||
|
64-bit systems, and 8B on 32-bit).
|
||||||
|
|
||||||
|
PartitionAlloc also supports higher levels of alignment, that can be requested
|
||||||
|
via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
|
||||||
|
`posix_memalign()`). The requested
|
||||||
|
alignment has to be a power of two. PartitionAlloc reserves the right to round
|
||||||
|
up the requested size to the nearest power of two, greater than or equal to the
|
||||||
|
requested alignment. This may be wasteful, but allows taking advantage of
|
||||||
|
natural PartitionAlloc alignment guarantees. Allocations with an alignment
|
||||||
|
requirement greater than `partition_alloc::internal::kAlignment` are expected
|
||||||
|
to be very rare.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Layout in Memory
|
||||||
|
|
||||||
|
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
|
||||||
|
pages. Each super page is split into partition pages.
|
||||||
|
The first and the last partition page are permanently inaccessible and serve
|
||||||
|
as guard pages, with the exception of one system page in the middle of the first
|
||||||
|
partition page that holds metadata (32B struct per partition page).
|
||||||
|
|
||||||
|
![anatomy of a super page](./dot/super-page.png)
|
||||||
|
|
||||||
|
* The slot span numbers provide a visual hint of their size (in partition
|
||||||
|
pages).
|
||||||
|
* Colors provide a visual hint of the bucket to which the slot span belongs.
|
||||||
|
* Although only five colors are shown, in reality, a super page holds
|
||||||
|
tens of slot spans, some of which belong to the same bucket.
|
||||||
|
* The system page that holds metadata tracks each partition page with one 32B
|
||||||
|
[`PartitionPage` struct][PartitionPage], which is either
|
||||||
|
* a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or
|
||||||
|
* a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the
|
||||||
|
diagram).
|
||||||
|
* Gray fill denotes guard pages (one partition page each at the head and tail
|
||||||
|
of each super page).
|
||||||
|
|
||||||
|
As allocation requests arrive, there is eventually a need to allocate a new slot
|
||||||
|
span.
|
||||||
|
Address space for such a slot span is carved out from the last super page. If
|
||||||
|
not enough space, a new super page is allocated. Due to varying sizes of slot
|
||||||
|
span, this may lead to leaving space unused (we never go back to fill previous
|
||||||
|
super pages), which is fine because this memory is merely reserved, which is far
|
||||||
|
less precious than committed memory. Note also that address space reserved for a
|
||||||
|
slot span is never released, even if the slot span isn't used for a long time.
|
||||||
|
|
||||||
|
All slots in a newly allocated slot span are *free*, i.e. available for
|
||||||
|
allocation.
|
||||||
|
|
||||||
|
### Freelist Pointers
|
||||||
|
|
||||||
|
All free slots within a slot span are chained into a singly-linked free-list,
|
||||||
|
by writing the *next* pointer at the beginning of each slot, and the head of the
|
||||||
|
list is written in the metadata struct.
|
||||||
|
|
||||||
|
However, writing a pointer in each free slot of a newly allocated span would
|
||||||
|
require committing and faulting in physical pages upfront, which would be
|
||||||
|
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
|
||||||
|
Only provisioned slots are chained into the freelist.
|
||||||
|
Once provisioned slots in a span are depleted, then another page worth of slots
|
||||||
|
is provisioned (note, a slot that crosses a page boundary only gets
|
||||||
|
provisioned with slots of the next page). See
|
||||||
|
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
|
||||||
|
|
||||||
|
Freelist pointers are stored at the beginning of each free slot. As such, they
|
||||||
|
are the only metadata that is inline, i.e. stored among the
|
||||||
|
objects. This makes them prone to overruns. On little-endian systems, the
|
||||||
|
pointers are encoded by reversing byte order, so that partial overruns will very
|
||||||
|
likely result in destroying the pointer, as opposed to forming a valid pointer
|
||||||
|
to a nearby location.
|
||||||
|
|
||||||
|
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
|
||||||
|
different manner. This helps PartitionAlloc detect corruptions.
|
||||||
|
|
||||||
|
### Slot Span States
|
||||||
|
|
||||||
|
A slot span can be in any of 4 states:
|
||||||
|
* *Full*. A full span has no free slots.
|
||||||
|
* *Empty*. An empty span has no allocated slots, only free slots.
|
||||||
|
* *Active*. An active span is anything in between the above two.
|
||||||
|
* *Decommitted*. A decommitted span is a special case of an empty span, where
|
||||||
|
all pages are decommitted from memory.
|
||||||
|
|
||||||
|
PartitionAlloc prioritizes getting an available slot from an active span, over
|
||||||
|
an empty one, in hope that the latter can be soon transitioned into a
|
||||||
|
decommitted state, thus releasing memory. There is no mechanism, however, to
|
||||||
|
prioritize selection of a slot span based on the number of already allocated
|
||||||
|
slots.
|
||||||
|
|
||||||
|
An empty span becomes decommitted either when there are too many empty spans
|
||||||
|
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
|
||||||
|
low memory pressure conditions). An allocation can be satisfied from
|
||||||
|
a decommitted span if there are no active or empty spans available. The slot
|
||||||
|
provisioning mechanism kicks back in, committing the pages gradually as needed,
|
||||||
|
and the span becomes active. (There is currently no other way
|
||||||
|
to unprovision slots than decommitting the entire span).
|
||||||
|
|
||||||
|
As mentioned above, a bucket is a collection of slot spans containing slots of
|
||||||
|
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
|
||||||
|
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
|
||||||
|
There is no need for a full span list. The lists are updated lazily. An empty,
|
||||||
|
decommitted or full span may stay on the active list for some time, until
|
||||||
|
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
|
||||||
|
A decommitted span may stay on the empty list for some time,
|
||||||
|
until `PartitionBucket<thread_safe>::SlowPathAlloc()` encounters it. However,
|
||||||
|
the inaccuracy can't happen in the other direction, i.e. an active span can only
|
||||||
|
be on the active list, and an empty span can only be on the active or empty
|
||||||
|
list.
|
||||||
|
|
||||||
|
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
||||||
|
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
|
541
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
541
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
@ -0,0 +1,541 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_stats.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
AddressPoolManager AddressPoolManager::singleton_;
|
||||||
|
|
||||||
|
// static
|
||||||
|
AddressPoolManager& AddressPoolManager::GetInstance() {
|
||||||
|
return singleton_;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// This will crash if the range cannot be decommitted.
|
||||||
|
void DecommitPages(uintptr_t address, size_t size) {
|
||||||
|
// Callers rely on the pages being zero-initialized when recommitting them.
|
||||||
|
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
|
||||||
|
// particular on macOS, but |DecommitAndZeroSystemPages| does.
|
||||||
|
DecommitAndZeroSystemPages(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
|
||||||
|
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
|
||||||
|
|
||||||
|
for (pool_handle i = 0; i < std::size(pools_); ++i) {
|
||||||
|
if (!pools_[i].IsInitialized()) {
|
||||||
|
pools_[i].Initialize(ptr, length);
|
||||||
|
return i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PA_NOTREACHED();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolUsedSuperPages(
|
||||||
|
pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pool->GetUsedSuperPages(used);
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return pool->GetBaseAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
for (pool_handle i = 0; i < std::size(pools_); ++i)
|
||||||
|
pools_[i].Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Remove(pool_handle handle) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
pool->Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!requested_address)
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
const bool is_available = pool->TryReserveChunk(requested_address, length);
|
||||||
|
if (is_available)
|
||||||
|
return requested_address;
|
||||||
|
return pool->FindChunk(length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
PA_DCHECK(pool->IsInitialized());
|
||||||
|
DecommitPages(address, length);
|
||||||
|
pool->FreeChunk(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
|
||||||
|
PA_CHECK(ptr != 0);
|
||||||
|
PA_CHECK(!(ptr & kSuperPageOffsetMask));
|
||||||
|
PA_CHECK(!(length & kSuperPageOffsetMask));
|
||||||
|
address_begin_ = ptr;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
address_end_ = ptr + length;
|
||||||
|
PA_DCHECK(address_begin_ < address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
total_bits_ = length / kSuperPageSize;
|
||||||
|
PA_CHECK(total_bits_ <= kMaxSuperPagesInPool);
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
alloc_bitset_.reset();
|
||||||
|
bit_hint_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::IsInitialized() {
|
||||||
|
return address_begin_ != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::Reset() {
|
||||||
|
address_begin_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetUsedSuperPages(
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
used = alloc_bitset_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
|
||||||
|
PA_DCHECK(IsInitialized());
|
||||||
|
return address_begin_;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t need_bits = requested_size >> kSuperPageShift;
|
||||||
|
|
||||||
|
// Use first-fit policy to find an available chunk from free chunks. Start
|
||||||
|
// from |bit_hint_|, because we know there are no free chunks before.
|
||||||
|
size_t beg_bit = bit_hint_;
|
||||||
|
size_t curr_bit = bit_hint_;
|
||||||
|
while (true) {
|
||||||
|
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
|
||||||
|
// |total_bits_|, return |nullptr| to signal no free chunk was found.
|
||||||
|
size_t end_bit = beg_bit + need_bits;
|
||||||
|
if (end_bit > total_bits_)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
bool found = true;
|
||||||
|
for (; curr_bit < end_bit; ++curr_bit) {
|
||||||
|
if (alloc_bitset_.test(curr_bit)) {
|
||||||
|
// The bit was set, so this chunk isn't entirely free. Set |found=false|
|
||||||
|
// to ensure the outer loop continues. However, continue the inner loop
|
||||||
|
// to set |beg_bit| just past the last set bit in the investigated
|
||||||
|
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
|
||||||
|
// next outer loop pass from checking the same bits.
|
||||||
|
beg_bit = curr_bit + 1;
|
||||||
|
found = false;
|
||||||
|
if (bit_hint_ == curr_bit)
|
||||||
|
++bit_hint_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
|
||||||
|
// mark as allocated) and return the allocated address.
|
||||||
|
if (found) {
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
if (bit_hint_ == beg_bit) {
|
||||||
|
bit_hint_ = end_bit;
|
||||||
|
}
|
||||||
|
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + requested_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOTREACHED();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
|
||||||
|
size_t requested_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||||
|
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t need_bits = requested_size / kSuperPageSize;
|
||||||
|
const size_t end_bit = begin_bit + need_bits;
|
||||||
|
// Check that requested address is not too high.
|
||||||
|
if (end_bit > total_bits_)
|
||||||
|
return false;
|
||||||
|
// Check if any bit of the requested region is set already.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
if (alloc_bitset_.test(i))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Otherwise, set the bits.
|
||||||
|
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||||
|
alloc_bitset_.set(i);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
|
||||||
|
|
||||||
|
PA_DCHECK(address_begin_ <= address);
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_DCHECK(address + free_size <= address_end_);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
|
||||||
|
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
|
||||||
|
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(alloc_bitset_.test(i));
|
||||||
|
alloc_bitset_.reset(i);
|
||||||
|
}
|
||||||
|
bit_hint_ = std::min(bit_hint_, beg_bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::Pool::GetStats(PoolStats* stats) {
|
||||||
|
std::bitset<kMaxSuperPagesInPool> pages;
|
||||||
|
size_t i;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(lock_);
|
||||||
|
pages = alloc_bitset_;
|
||||||
|
i = bit_hint_;
|
||||||
|
}
|
||||||
|
|
||||||
|
stats->usage = pages.count();
|
||||||
|
|
||||||
|
size_t largest_run = 0;
|
||||||
|
size_t current_run = 0;
|
||||||
|
for (; i < total_bits_; ++i) {
|
||||||
|
if (!pages[i]) {
|
||||||
|
current_run += 1;
|
||||||
|
continue;
|
||||||
|
} else if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
current_run = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fell out of the loop with last bit being zero. Check once more.
|
||||||
|
if (current_run > largest_run) {
|
||||||
|
largest_run = current_run;
|
||||||
|
}
|
||||||
|
stats->largest_available_reservation = largest_run;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::GetPoolStats(const pool_handle handle,
|
||||||
|
PoolStats* stats) {
|
||||||
|
Pool* pool = GetPool(handle);
|
||||||
|
if (!pool->IsInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
pool->GetStats(stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
// Get 64-bit pool stats.
|
||||||
|
GetPoolStats(GetRegularPool(), &stats->regular_pool_stats);
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
GetPoolStats(GetBRPPool(), &stats->brp_pool_stats);
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (IsConfigurablePoolAvailable()) {
|
||||||
|
GetPoolStats(GetConfigurablePool(), &stats->configurable_pool_stats);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
|
||||||
|
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
|
||||||
|
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
|
||||||
|
"kGuardOffsetOfBRPPoolBitmap.");
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void SetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(!bitmap.test(i));
|
||||||
|
bitmap.set(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <size_t bitsize>
|
||||||
|
void ResetBitmap(std::bitset<bitsize>& bitmap,
|
||||||
|
size_t start_bit,
|
||||||
|
size_t bit_length) {
|
||||||
|
const size_t end_bit = start_bit + bit_length;
|
||||||
|
PA_DCHECK(start_bit <= bitsize);
|
||||||
|
PA_DCHECK(end_bit <= bitsize);
|
||||||
|
|
||||||
|
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||||
|
PA_DCHECK(bitmap.test(i));
|
||||||
|
bitmap.reset(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t address = AllocPages(requested_address, length, kSuperPageSize,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible,
|
||||||
|
PageTag::kPartitionAlloc);
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||||
|
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||||
|
FreePages(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUsed(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given:
|
||||||
|
//
|
||||||
|
// ------+---+---------------+---+----
|
||||||
|
// memory ..... | B | managed by PA | B | ...
|
||||||
|
// regions ------+---+---------------+---+----
|
||||||
|
//
|
||||||
|
// B: PartitionPageSize()-bytes block. This is used internally by the
|
||||||
|
// allocator and is not available for callers.
|
||||||
|
//
|
||||||
|
// This is required to avoid crash caused by the following code:
|
||||||
|
// {
|
||||||
|
// // Assume this allocation happens outside of PartitionAlloc.
|
||||||
|
// raw_ptr<T> ptr = new T[20];
|
||||||
|
// for (size_t i = 0; i < 20; i ++) { ptr++; }
|
||||||
|
// // |ptr| may point to an address inside 'B'.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Suppose that |ptr| points to an address inside B after the loop. If
|
||||||
|
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
|
||||||
|
// crash, since the memory is not allocated by PartitionAlloc.
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::MarkUnused(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length) {
|
||||||
|
// Address regions allocated for normal buckets are never released, so this
|
||||||
|
// function can only be called for direct map. However, do not DCHECK on
|
||||||
|
// IsManagedByDirectMap(address), because many tests test this function using
|
||||||
|
// small allocations.
|
||||||
|
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
if (handle == kBRPPoolHandle) {
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||||
|
|
||||||
|
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||||
|
// first or the last PartitionPageSize()-bytes block is given.
|
||||||
|
// (See MarkUsed comment)
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_,
|
||||||
|
(address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||||
|
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||||
|
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||||
|
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
{
|
||||||
|
PA_DCHECK(handle == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(
|
||||||
|
(length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
|
||||||
|
0);
|
||||||
|
ResetBitmap(
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_,
|
||||||
|
address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
|
||||||
|
length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddressPoolManager::ResetForTesting() {
|
||||||
|
ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_.reset();
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Counts super pages in use represented by `bitmap`.
|
||||||
|
template <size_t bitsize>
|
||||||
|
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
|
||||||
|
const size_t bits_per_super_page) {
|
||||||
|
size_t count = 0;
|
||||||
|
size_t bit_index = 0;
|
||||||
|
|
||||||
|
// Stride over super pages.
|
||||||
|
for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
|
||||||
|
// Stride over the bits comprising the super page.
|
||||||
|
for (bit_index = super_page_index * bits_per_super_page;
|
||||||
|
bit_index < (super_page_index + 1) * bits_per_super_page &&
|
||||||
|
bit_index < bitsize;
|
||||||
|
++bit_index) {
|
||||||
|
if (bitmap[bit_index]) {
|
||||||
|
count += 1;
|
||||||
|
// Move on to the next super page.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
|
||||||
|
{
|
||||||
|
ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
|
||||||
|
regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
|
||||||
|
brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
|
||||||
|
} // scoped_lock
|
||||||
|
|
||||||
|
// Pool usage is read out from the address pool bitmaps.
|
||||||
|
// The output stats are sized in super pages, so we interpret
|
||||||
|
// the bitmaps into super page usage.
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize %
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kRegularPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize /
|
||||||
|
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Get 32-bit pool usage.
|
||||||
|
stats->regular_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static_assert(
|
||||||
|
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||||
|
0,
|
||||||
|
"information loss when calculating metrics");
|
||||||
|
constexpr size_t kBRPPoolBitsPerSuperPage =
|
||||||
|
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
stats->brp_pool_stats.usage =
|
||||||
|
CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);
|
||||||
|
|
||||||
|
// Get blocklist size.
|
||||||
|
for (const auto& blocked :
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
|
||||||
|
if (blocked.load(std::memory_order_relaxed))
|
||||||
|
stats->blocklist_size += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count failures in finding non-blocklisted addresses.
|
||||||
|
stats->blocklist_hit_count =
|
||||||
|
AddressPoolManagerBitmap::blocklist_hit_count_.load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
|
||||||
|
AddressSpaceStats stats{};
|
||||||
|
if (GetStats(&stats)) {
|
||||||
|
dumper->DumpStats(&stats);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
197
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
197
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||||
|
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
template <typename Type>
|
||||||
|
struct LazyInstanceTraitsBase;
|
||||||
|
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
class AddressSpaceStatsDumper;
|
||||||
|
struct AddressSpaceStats;
|
||||||
|
struct PoolStats;
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// (64bit version)
|
||||||
|
// AddressPoolManager takes a reserved virtual address space and manages address
|
||||||
|
// space allocation.
|
||||||
|
//
|
||||||
|
// AddressPoolManager (currently) supports up to 3 pools. Each pool manages a
|
||||||
|
// contiguous reserved address space. Alloc() takes a pool_handle and returns
|
||||||
|
// address regions from the specified pool. Free() also takes a pool_handle and
|
||||||
|
// returns the address region back to the manager.
|
||||||
|
//
|
||||||
|
// (32bit version)
|
||||||
|
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
|
||||||
|
// address regions using bitmaps. IsManagedByPartitionAllocBRPPool and
|
||||||
|
// IsManagedByPartitionAllocRegularPool use the bitmaps to judge whether a given
|
||||||
|
// address is in a pool that supports BackupRefPtr or in a pool that doesn't.
|
||||||
|
// All PartitionAlloc allocations must be in either of the pools.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
|
||||||
|
public:
|
||||||
|
static AddressPoolManager& GetInstance();
|
||||||
|
|
||||||
|
AddressPoolManager(const AddressPoolManager&) = delete;
|
||||||
|
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
pool_handle Add(uintptr_t address, size_t length);
|
||||||
|
void Remove(pool_handle handle);
|
||||||
|
|
||||||
|
// Populate a |used| bitset of superpages currently in use.
|
||||||
|
void GetPoolUsedSuperPages(pool_handle handle,
|
||||||
|
std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
|
||||||
|
// Return the base address of a pool.
|
||||||
|
uintptr_t GetPoolBaseAddress(pool_handle handle);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Reserves address space from GigaCage.
|
||||||
|
uintptr_t Reserve(pool_handle handle,
|
||||||
|
uintptr_t requested_address,
|
||||||
|
size_t length);
|
||||||
|
|
||||||
|
// Frees address space back to GigaCage and decommits underlying system pages.
|
||||||
|
void UnreserveAndDecommit(pool_handle handle,
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length);
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
void DumpStats(AddressSpaceStatsDumper* dumper);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManagerForTesting;
|
||||||
|
|
||||||
|
constexpr AddressPoolManager() = default;
|
||||||
|
~AddressPoolManager() = default;
|
||||||
|
|
||||||
|
// Populates `stats` if applicable.
|
||||||
|
// Returns whether `stats` was populated. (They might not be, e.g.
|
||||||
|
// if PartitionAlloc is wholly unused in this process.)
|
||||||
|
bool GetStats(AddressSpaceStats* stats);
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
class Pool {
|
||||||
|
public:
|
||||||
|
constexpr Pool() = default;
|
||||||
|
~Pool() = default;
|
||||||
|
|
||||||
|
Pool(const Pool&) = delete;
|
||||||
|
Pool& operator=(const Pool&) = delete;
|
||||||
|
|
||||||
|
void Initialize(uintptr_t ptr, size_t length);
|
||||||
|
bool IsInitialized();
|
||||||
|
void Reset();
|
||||||
|
|
||||||
|
uintptr_t FindChunk(size_t size);
|
||||||
|
void FreeChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
bool TryReserveChunk(uintptr_t address, size_t size);
|
||||||
|
|
||||||
|
void GetUsedSuperPages(std::bitset<kMaxSuperPagesInPool>& used);
|
||||||
|
uintptr_t GetBaseAddress();
|
||||||
|
|
||||||
|
void GetStats(PoolStats* stats);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Lock lock_;
|
||||||
|
|
||||||
|
// The bitset stores the allocation state of the address pool. 1 bit per
|
||||||
|
// super-page: 1 = allocated, 0 = free.
|
||||||
|
std::bitset<kMaxSuperPagesInPool> alloc_bitset_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
// An index of a bit in the bitset before which we know for sure there all
|
||||||
|
// 1s. This is a best-effort hint in the sense that there still may be lots
|
||||||
|
// of 1s after this index, but at least we know there is no point in
|
||||||
|
// starting the search before it.
|
||||||
|
size_t bit_hint_ PA_GUARDED_BY(lock_) = 0;
|
||||||
|
|
||||||
|
size_t total_bits_ = 0;
|
||||||
|
uintptr_t address_begin_ = 0;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
uintptr_t address_end_ = 0;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
|
||||||
|
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||||
|
return &pools_[handle - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the stats for the pool identified by `handle`, if
|
||||||
|
// initialized.
|
||||||
|
void GetPoolStats(pool_handle handle, PoolStats* stats);
|
||||||
|
|
||||||
|
Pool pools_[kNumPools];
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
static AddressPoolManager singleton_;
|
||||||
|
|
||||||
|
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetRegularPool() {
|
||||||
|
return kRegularPoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetBRPPool() {
|
||||||
|
return kBRPPoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
|
||||||
|
PA_DCHECK(IsConfigurablePoolAvailable());
|
||||||
|
return kConfigurablePoolHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
using ::partition_alloc::internal::AddressPoolManager;
|
||||||
|
using ::partition_alloc::internal::GetBRPPool;
|
||||||
|
using ::partition_alloc::internal::GetConfigurablePool;
|
||||||
|
using ::partition_alloc::internal::GetRegularPool;
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
Lock g_lock;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
Lock& AddressPoolManagerBitmap::GetLock() {
|
||||||
|
return g_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kRegularPoolBits>
|
||||||
|
AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
|
||||||
|
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
std::array<std::atomic_bool,
|
||||||
|
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
|
||||||
|
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
|
||||||
|
std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
@ -0,0 +1,190 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <bitset>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
|
||||||
|
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
|
||||||
|
// support it. All PartitionAlloc allocations must be in either of the pools.
|
||||||
|
//
|
||||||
|
// This code is specific to 32-bit systems.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
|
||||||
|
public:
|
||||||
|
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
|
||||||
|
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
|
||||||
|
|
||||||
|
// For BRP pool, we use partition page granularity to eliminate the guard
|
||||||
|
// pages from the bitmap at the ends:
|
||||||
|
// - Eliminating the guard page at the beginning is needed so that pointers
|
||||||
|
// to the end of an allocation that immediately precede a super page in BRP
|
||||||
|
// pool don't accidentally fall into that pool.
|
||||||
|
// - Eliminating the guard page at the end is to ensure that the last page
|
||||||
|
// of the address space isn't in the BRP pool. This allows using sentinels
|
||||||
|
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
|
||||||
|
// on an invalid address. (Note, 64-bit systems don't have this problem as
|
||||||
|
// the upper half of the address space always belongs to the OS.)
|
||||||
|
//
|
||||||
|
// Note, direct map allocations also belong to this pool. The same logic as
|
||||||
|
// above applies. It is important to note, however, that the granularity used
|
||||||
|
// here has to be a minimum of partition page size and direct map allocation
|
||||||
|
// granularity. Since DirectMapAllocationGranularity() is no smaller than
|
||||||
|
// PageAllocationGranularity(), we don't need to decrease the bitmap
|
||||||
|
// granularity any further.
|
||||||
|
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
|
||||||
|
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
|
||||||
|
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
|
||||||
|
static constexpr size_t kBRPPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
|
||||||
|
|
||||||
|
// Regular pool may include both normal bucket and direct map allocations, so
|
||||||
|
// the bitmap granularity has to be at least as small as
|
||||||
|
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
|
||||||
|
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
|
||||||
|
// the granularity to partition page size.
|
||||||
|
static constexpr size_t kBitShiftOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularityShift();
|
||||||
|
static constexpr size_t kBytesPer1BitOfRegularPoolBitmap =
|
||||||
|
DirectMapAllocationGranularity();
|
||||||
|
static_assert(kBytesPer1BitOfRegularPoolBitmap ==
|
||||||
|
1 << kBitShiftOfRegularPoolBitmap,
|
||||||
|
"");
|
||||||
|
static constexpr size_t kRegularPoolBits =
|
||||||
|
kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap;
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByRegularPool(uintptr_t address) {
|
||||||
|
static_assert(
|
||||||
|
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfRegularPoolBitmap <
|
||||||
|
regular_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of bounds "
|
||||||
|
"accesses.");
|
||||||
|
// It is safe to read |regular_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static bool IsManagedByBRPPool(uintptr_t address) {
|
||||||
|
static_assert(std::numeric_limits<uintptr_t>::max() >>
|
||||||
|
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
|
||||||
|
"The bitmap is too small, will result in unchecked out of "
|
||||||
|
"bounds accesses.");
|
||||||
|
// It is safe to read |brp_pool_bits_| without a lock since the caller
|
||||||
|
// is responsible for guaranteeing that the address is inside a valid
|
||||||
|
// allocation and the deallocation call won't race with this call.
|
||||||
|
return PA_TS_UNCHECKED_READ(
|
||||||
|
brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap];
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static void BanSuperPageFromBRPPool(uintptr_t address) {
|
||||||
|
brp_forbidden_super_page_map_[address >> kSuperPageShift].store(
|
||||||
|
true, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
|
||||||
|
// The only potentially dangerous scenario, in which this check is used, is
|
||||||
|
// when the assignment of the first raw_ptr<T> object for a non-GigaCage
|
||||||
|
// address is racing with the allocation of a new GigCage super-page at the
|
||||||
|
// same address. We assume that if raw_ptr<T> is being initialized with a
|
||||||
|
// raw pointer, the associated allocation is "alive"; otherwise, the issue
|
||||||
|
// should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
|
||||||
|
// In the worst case, when such a fix is impossible, we should just undo the
|
||||||
|
// raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
|
||||||
|
// above assumption holds, the existing allocation will prevent us from
|
||||||
|
// reserving the super-page region and, thus, having the race condition.
|
||||||
|
// Since we rely on that external synchronization, the relaxed memory
|
||||||
|
// ordering should be sufficient.
|
||||||
|
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; }
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AddressPoolManager;
|
||||||
|
|
||||||
|
static Lock& GetLock();
|
||||||
|
|
||||||
|
static std::bitset<kRegularPoolBits> regular_pool_bits_
|
||||||
|
PA_GUARDED_BY(GetLock());
|
||||||
|
static std::bitset<kBRPPoolBits> brp_pool_bits_ PA_GUARDED_BY(GetLock());
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
|
||||||
|
brp_forbidden_super_page_map_;
|
||||||
|
static std::atomic_size_t blocklist_hit_count_;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
// No need to add IsManagedByConfigurablePool, because Configurable Pool
|
||||||
|
// doesn't exist on 32-bit.
|
||||||
|
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
|
||||||
|
#endif
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address)
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
|
||||||
|
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
|
||||||
|
uintptr_t address) {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
|
||||||
|
// The Configurable Pool is only available on 64-bit builds.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
using pool_handle = unsigned;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||||
|
// the migration to the new namespaces gets done.
|
||||||
|
using ::partition_alloc::internal::pool_handle;
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/random.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h> // Must be in front of other Windows header files.
|
||||||
|
|
||||||
|
#include <versionhelpers.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
uintptr_t GetRandomPageBase() {
|
||||||
|
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
random <<= 32ULL;
|
||||||
|
random |= static_cast<uintptr_t>(internal::RandomValue());
|
||||||
|
|
||||||
|
// The ASLRMask() and ASLROffset() constants will be suitable for the
|
||||||
|
// OS and build configuration.
|
||||||
|
#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
// Windows >= 8.1 has the full 47 bits. Use them where available.
|
||||||
|
static bool windows_81 = false;
|
||||||
|
static bool windows_81_initialized = false;
|
||||||
|
if (!windows_81_initialized) {
|
||||||
|
windows_81 = IsWindows8Point1OrGreater();
|
||||||
|
windows_81_initialized = true;
|
||||||
|
}
|
||||||
|
if (!windows_81) {
|
||||||
|
random &= internal::ASLRMaskBefore8_10();
|
||||||
|
} else {
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
}
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#else
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
#else // defined(ARCH_CPU_32_BITS)
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// On win32 host systems the randomization plus huge alignment causes
|
||||||
|
// excessive fragmentation. Plus most of these systems lack ASLR, so the
|
||||||
|
// randomization isn't buying anything. In that case we just skip it.
|
||||||
|
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
|
||||||
|
static BOOL is_wow64 = -1;
|
||||||
|
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
|
||||||
|
is_wow64 = FALSE;
|
||||||
|
if (!is_wow64)
|
||||||
|
return 0;
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
random &= internal::ASLRMask();
|
||||||
|
random += internal::ASLROffset();
|
||||||
|
#endif // defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
return random;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,290 @@
|
|||||||
|
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Calculates a random preferred mapping address. In calculating an address, we
|
||||||
|
// balance good ASLR against not fragmenting the address space too badly.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
AslrAddress(uintptr_t mask) {
|
||||||
|
return mask & PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
AslrMask(uintptr_t bits) {
|
||||||
|
return AslrAddress((1ULL << bits) - 1ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn off formatting, because the thicket of nested ifdefs below is
|
||||||
|
// incomprehensible without indentation. It is also incomprehensible with
|
||||||
|
// indentation, but the only other option is a combinatorial explosion of
|
||||||
|
// *_{win,linux,mac,foo}_{32,64}.h files.
|
||||||
|
//
|
||||||
|
// clang-format off
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||||
|
|
||||||
|
// We shouldn't allocate system pages at all for sanitizer builds. However,
|
||||||
|
// we do, and if random hint addresses interfere with address ranges
|
||||||
|
// hard-coded in those tools, bad things happen. This address range is
|
||||||
|
// copied from TSAN source but works with all tools. See
|
||||||
|
// https://crbug.com/539863.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrAddress(0x007fffffffffULL);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x7e8000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
// Windows 8.10 and newer support the full 48 bit address range. Older
|
||||||
|
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
|
||||||
|
// and may cause a carry, use 47 and 43 bit masks. See
|
||||||
|
// http://www.alex-ionescu.com/?p=246
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(47);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
|
||||||
|
return AslrMask(43);
|
||||||
|
}
|
||||||
|
// Try not to map pages into the range where Windows loads DLLs by default.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return 0x80000000ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_APPLE)
|
||||||
|
|
||||||
|
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
|
||||||
|
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
|
||||||
|
// is destroyed. Using a virtual address space that is too large causes a
|
||||||
|
// leak of about 1 wired [can never be paged out] page per call to mmap. The
|
||||||
|
// page is only reclaimed when the process is killed. Confine the hint to a
|
||||||
|
// 39-bit section of the virtual address space.
|
||||||
|
//
|
||||||
|
// This implementation adapted from
|
||||||
|
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
|
||||||
|
// is that here we clamp to 39 bits, not 32.
|
||||||
|
//
|
||||||
|
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
|
||||||
|
// changes.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
// Be careful, there is a zone where macOS will not map memory, at least
|
||||||
|
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
|
||||||
|
// [0x1000000000, 0x7000000000). Make sure that the range we use is
|
||||||
|
// outside these bounds. In 12.3, there is a reserved area between
|
||||||
|
// MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
|
||||||
|
// which is reserved on ARM64. See these constants in XNU's source code
|
||||||
|
// for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
|
||||||
|
return AslrAddress(0x10000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_X86_64)
|
||||||
|
|
||||||
|
// Linux (and macOS) support the full 47-bit user space of x64 processors.
|
||||||
|
// Use only 46 to allow the kernel a chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
// Restrict the address range on Android to avoid a large performance
|
||||||
|
// regression in single-process WebViews. See https://crbug.com/837640.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_LINUX)
|
||||||
|
|
||||||
|
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
|
||||||
|
// page size and number of levels of translation pages used. We use
|
||||||
|
// 39-bit as base as all setups should support this, lowered to 38-bit
|
||||||
|
// as ASLROffset() could cause a carry.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||||
|
// could cause a carry.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(38);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x1000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_PPC64)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// AIX has 64 bits of virtual addressing, but we limit the address range
|
||||||
|
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
|
||||||
|
// extra address space to isolate the mmap regions.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x400000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(42);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(46);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390X)
|
||||||
|
|
||||||
|
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
|
||||||
|
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
|
||||||
|
// chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(40);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
|
||||||
|
// a chance to fulfill the request.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(29);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
// For all other POSIX variants, use 30 bits.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_SOLARIS)
|
||||||
|
|
||||||
|
// For our Solaris/illumos mmap hint, we pick a random address in the
|
||||||
|
// bottom half of the top half of the address space (that is, the third
|
||||||
|
// quarter). Because we do not MAP_FIXED, this will be treated only as a
|
||||||
|
// hint -- the system will not fail to mmap because something else
|
||||||
|
// happens to already be mapped at our random address. We deliberately
|
||||||
|
// set the hint high enough to get well above the system's break (that
|
||||||
|
// is, the heap); Solaris and illumos will try the hint and if that
|
||||||
|
// fails allocate as if there were no hint at all. The high hint
|
||||||
|
// prevents the break from getting hemmed in at low values, ceding half
|
||||||
|
// of the address space to the system heap.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x80000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
|
||||||
|
// upper range.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x90000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
|
||||||
|
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
|
||||||
|
// 10.6 and 10.7.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX)
|
||||||
|
|
||||||
|
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||||
|
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_POSIX)
|
||||||
|
|
||||||
|
#elif defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// This is a good range on 32-bit Windows and Android (the only platforms on
|
||||||
|
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
|
||||||
|
// is no issue with carries here.
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||||
|
return AslrMask(30);
|
||||||
|
}
|
||||||
|
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||||
|
return AslrAddress(0x20000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#error Please tell us about your exotic hardware! Sounds interesting.
|
||||||
|
|
||||||
|
#endif // defined(ARCH_CPU_32_BITS)
|
||||||
|
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
52
src/base/allocator/partition_allocator/address_space_stats.h
Normal file
52
src/base/allocator/partition_allocator/address_space_stats.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// All members are measured in super pages.
|
||||||
|
struct PoolStats {
|
||||||
|
size_t usage = 0;
|
||||||
|
|
||||||
|
// On 32-bit, GigaCage is mainly a logical entity, intermingled with
|
||||||
|
// allocations not managed by PartitionAlloc. The "largest available
|
||||||
|
// reservation" is not possible to measure in that case.
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
size_t largest_available_reservation = 0;
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AddressSpaceStats {
|
||||||
|
PoolStats regular_pool_stats;
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PoolStats brp_pool_stats;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
PoolStats configurable_pool_stats;
|
||||||
|
#else
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
size_t blocklist_size; // measured in super pages
|
||||||
|
size_t blocklist_hit_count;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Interface passed to `AddressPoolManager::DumpStats()` to mediate
|
||||||
|
// for `AddressSpaceDumpProvider`.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper {
|
||||||
|
public:
|
||||||
|
virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_
|
41
src/base/allocator/partition_allocator/allocation_guard.cc
Normal file
41
src/base/allocator/partition_allocator/allocation_guard.cc
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
|
||||||
|
#if defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
thread_local bool g_disallow_allocations;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::ScopedDisallowAllocations() {
|
||||||
|
if (g_disallow_allocations)
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
|
||||||
|
g_disallow_allocations = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedDisallowAllocations::~ScopedDisallowAllocations() {
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::ScopedAllowAllocations() {
|
||||||
|
// Save the previous value, as ScopedAllowAllocations is used in all
|
||||||
|
// partitions, not just the malloc() ones(s).
|
||||||
|
saved_value_ = g_disallow_allocations;
|
||||||
|
g_disallow_allocations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopedAllowAllocations::~ScopedAllowAllocations() {
|
||||||
|
g_disallow_allocations = saved_value_;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_ALLOCATION_GUARD)
|
49
src/base/allocator/partition_allocator/allocation_guard.h
Normal file
49
src/base/allocator/partition_allocator/allocation_guard.h
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
#if defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedDisallowAllocations();
|
||||||
|
~ScopedDisallowAllocations();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Disallow allocations in the scope. Does not nest.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
|
||||||
|
public:
|
||||||
|
ScopedAllowAllocations();
|
||||||
|
~ScopedAllowAllocations();
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool saved_value_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
struct [[maybe_unused]] ScopedDisallowAllocations{};
|
||||||
|
struct [[maybe_unused]] ScopedAllowAllocations{};
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_ALLOCATION_GUARD)
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
using ::partition_alloc::ScopedAllowAllocations;
|
||||||
|
using ::partition_alloc::ScopedDisallowAllocations;
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
# This file contains a test function for checking Arm's branch target
|
||||||
|
# identification (BTI) feature, which helps mitigate jump-oriented
|
||||||
|
# programming. To get it working, BTI instructions must be executed
|
||||||
|
# on a compatible core, and the executable pages must be mapped with
|
||||||
|
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
|
||||||
|
# correctly:
|
||||||
|
# 1) Allocate a read-write page.
|
||||||
|
# 2) Copy between the start and end symbols into that page.
|
||||||
|
# 3) Set the page to read-execute with PROT_BTI.
|
||||||
|
# 4) Call the first offset of the page, verify the result.
|
||||||
|
# 5) Call the second offset of the page (skipping the landing pad).
|
||||||
|
# Verify that it crashes as expected.
|
||||||
|
# This test works irrespective of whether BTI is enabled for C/C++
|
||||||
|
# objects via -mbranch-protection=standard.
|
||||||
|
|
||||||
|
.text
|
||||||
|
.global arm_bti_test_function
|
||||||
|
.global arm_bti_test_function_invalid_offset
|
||||||
|
.global arm_bti_test_function_end
|
||||||
|
arm_bti_test_function:
|
||||||
|
# Mark the start of this function as a valid call target.
|
||||||
|
bti jc
|
||||||
|
add x0, x0, #1
|
||||||
|
arm_bti_test_function_invalid_offset:
|
||||||
|
# This label simulates calling an incomplete function.
|
||||||
|
# Jumping here should crash systems which support BTI.
|
||||||
|
add x0, x0, #2
|
||||||
|
ret
|
||||||
|
arm_bti_test_function_end:
|
||||||
|
nop
|
||||||
|
|
||||||
|
// For details see section "6.2 Program Property" in
|
||||||
|
// "ELF for the Arm 64-bit Architecture (AArch64)"
|
||||||
|
// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property
|
||||||
|
.pushsection .note.gnu.property, "a";
|
||||||
|
.balign 8;
|
||||||
|
.long 4;
|
||||||
|
.long 0x10;
|
||||||
|
.long 0x5;
|
||||||
|
.asciz "GNU";
|
||||||
|
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
|
||||||
|
.long 4;
|
||||||
|
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
|
||||||
|
.long 0;
|
||||||
|
.popsection
|
||||||
|
|
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_ARM64)
|
||||||
|
extern "C" {
|
||||||
|
/**
|
||||||
|
* A valid BTI function. Jumping to this funtion should not cause any problem in
|
||||||
|
* a BTI enabled environment.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A function without proper BTI landing pad. Jumping here should crash the
|
||||||
|
* program on systems which support BTI.
|
||||||
|
**/
|
||||||
|
int64_t arm_bti_test_function_invalid_offset(int64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A simple function which immediately returns to sender.
|
||||||
|
**/
|
||||||
|
void arm_bti_test_function_end(void);
|
||||||
|
}
|
||||||
|
#endif // defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
123
src/base/allocator/partition_allocator/build_config.md
Normal file
123
src/base/allocator/partition_allocator/build_config.md
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Build Config
|
||||||
|
|
||||||
|
PartitionAlloc's behavior and operation can be influenced by many
|
||||||
|
different settings. Broadly, these are controlled at the top-level by
|
||||||
|
[GN args][gn-declare-args], which propagate via
|
||||||
|
[buildflags][buildflag-header] and `#defined` clauses.
|
||||||
|
|
||||||
|
*** promo
|
||||||
|
Most of what you'll want to know exists between
|
||||||
|
|
||||||
|
* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
|
||||||
|
* [`allocator.gni`][allocator-gni],
|
||||||
|
* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
|
||||||
|
* [`//base/BUILD.gn`][base-build-gn].
|
||||||
|
***
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of
|
||||||
|
PartitionAlloc's behavior is governed by compound conditions `#defined`
|
||||||
|
in [`partition_alloc_config.h`][partition-alloc-config].
|
||||||
|
***
|
||||||
|
|
||||||
|
## Select GN Args
|
||||||
|
|
||||||
|
### `use_partition_alloc`
|
||||||
|
|
||||||
|
Defines whether PartitionAlloc is at all available.
|
||||||
|
|
||||||
|
Setting this `false` will entirely remove PartitionAlloc from the
|
||||||
|
Chromium build. _You probably do not want this._
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Back when PartitionAlloc was the dedicated allocator in Blink, disabling
|
||||||
|
it was logically identical to wholly disabling it in Chromium. This GN
|
||||||
|
arg organically grew in scope with the advent of
|
||||||
|
PartitionAlloc-Everywhere and must be `true` as a prerequisite for
|
||||||
|
enabling PA-E.
|
||||||
|
***
|
||||||
|
|
||||||
|
### `use_allocator`
|
||||||
|
|
||||||
|
Does nothing special when value is `"none"`. Enables
|
||||||
|
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is
|
||||||
|
`"partition"`.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
|
||||||
|
includes Blink where PartitionAlloc originated, setting
|
||||||
|
`use_allocator = "none"` does not disable PA usage in Blink.
|
||||||
|
* `use_allocator = "partition"` internally sets
|
||||||
|
`use_partition_alloc_as_malloc = true`, which must not be confused
|
||||||
|
with `use_partition_alloc` (see above).
|
||||||
|
***
|
||||||
|
|
||||||
|
### `use_backup_ref_ptr`
|
||||||
|
|
||||||
|
Specifies `BackupRefPtr` as the implementation for `base::raw_ptr<T>`
|
||||||
|
when `true`. See the [MiraclePtr documentation][miracleptr-doc].
|
||||||
|
|
||||||
|
*** aside
|
||||||
|
BRP requires support from PartitionAlloc, so `use_backup_ref_ptr` also
|
||||||
|
compiles the relevant code into PA. However, this arg does _not_ govern
|
||||||
|
whether or not BRP is actually enabled at runtime - that functionality
|
||||||
|
is controlled by a Finch flag.
|
||||||
|
***
|
||||||
|
|
||||||
|
## Note: Component Builds
|
||||||
|
|
||||||
|
When working on PartitionAlloc, know that `is_debug` defaults to
|
||||||
|
implying `is_component_build`, which interferes with the allocator
|
||||||
|
shim. A typical set of GN args should include
|
||||||
|
|
||||||
|
```none
|
||||||
|
is_debug = true
|
||||||
|
is_component_build = false
|
||||||
|
```
|
||||||
|
|
||||||
|
Conversely, build configurations that have `is_component_build = true`
|
||||||
|
without explicitly specifying PA-specific args will not build with PA-E
|
||||||
|
enabled.
|
||||||
|
|
||||||
|
## Notable Macros
|
||||||
|
|
||||||
|
There is an ongoing effort
|
||||||
|
[to break out PartitionAlloc into a standalone library][pa-ee-crbug].
|
||||||
|
Once PartitionAlloc stands alone from the larger Chrome build apparatus,
|
||||||
|
the code loses access to some macros. This is not an immediate concern,
|
||||||
|
but the team needs to decide either
|
||||||
|
|
||||||
|
* how to propagate these macros in place, or
|
||||||
|
* how to remove them, replacing them with PA-specific build config.
|
||||||
|
|
||||||
|
A non-exhaustive list of work items:
|
||||||
|
|
||||||
|
* `OFFICIAL_BUILD` - influences crash macros and
|
||||||
|
`PA_THREAD_CACHE_ALLOC_STATS`. These are conceptually distinct enough
|
||||||
|
to be worth separating into dedicated build controls.
|
||||||
|
* `IS_PARTITION_ALLOC_IMPL` - must be defined when PartitionAlloc is
|
||||||
|
built as a shared library. This is required to export symbols.
|
||||||
|
* `COMPONENT_BUILD` - component builds (as per
|
||||||
|
`//docs/component_build.md`) must `#define COMPONENT_BUILD`.
|
||||||
|
Additionally, to build Win32, invoker must `#define WIN32`.
|
||||||
|
* `MEMORY_TOOL_REPLACES_ALLOCATOR`
|
||||||
|
* `*_SANITIZER` - mainly influences unit tests.
|
||||||
|
|
||||||
|
TODO(crbug.com/1151236): don't `PA_COMPONENT_EXPORT()` functions defined
|
||||||
|
under `partition_alloc_base/`.
|
||||||
|
|
||||||
|
*** note
|
||||||
|
Over time, the above list should evolve into a list of macros / GN args
|
||||||
|
that influence PartitionAlloc's behavior.
|
||||||
|
***
|
||||||
|
|
||||||
|
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
|
||||||
|
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
|
||||||
|
[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
|
||||||
|
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
|
||||||
|
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
|
||||||
|
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
|
||||||
|
[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h
|
||||||
|
[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
|
||||||
|
[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
|
||||||
|
[pa-ee-crbug]: https://crbug.com/1151236
|
@ -0,0 +1,47 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
|
||||||
|
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_detected_fn);
|
||||||
|
return g_dangling_raw_ptr_detected_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrReleasedFn() {
|
||||||
|
PA_DCHECK(g_dangling_raw_ptr_released_fn);
|
||||||
|
return g_dangling_raw_ptr_released_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_detected_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
|
||||||
|
PA_DCHECK(fn);
|
||||||
|
g_dangling_raw_ptr_released_fn = fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_detected_fn(id);
|
||||||
|
}
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
|
||||||
|
g_dangling_raw_ptr_released_fn(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
// When compiled with build flags `enable_dangling_raw_ptr_checks`, dangling
|
||||||
|
// raw_ptr are reported. Its behavior can be configured here.
|
||||||
|
//
|
||||||
|
// Purpose of this level of indirection:
|
||||||
|
// - Ease testing.
|
||||||
|
// - Keep partition_alloc/ independent from base/. In most cases, when a
|
||||||
|
// dangling raw_ptr is detected/released, this involves recording a
|
||||||
|
// base::debug::StackTrace, which isn't desirable inside partition_alloc/.
|
||||||
|
// - Be able (potentially) to turn this feature on/off at runtime based on
|
||||||
|
// dependant's flags.
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// DanglingRawPtrDetected is called when there exists a `raw_ptr` referencing a
|
||||||
|
// memory region and the allocator is asked to release it.
|
||||||
|
//
|
||||||
|
// It won't be called again with the same `id`, up until (potentially) a call to
|
||||||
|
// DanglingRawPtrReleased(`id`) is made.
|
||||||
|
//
|
||||||
|
// This function is called from within the allocator, and is not allowed to
|
||||||
|
// allocate memory.
|
||||||
|
using DanglingRawPtrDetectedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
|
||||||
|
|
||||||
|
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
|
||||||
|
// last dangling raw_ptr stops referencing the memory region.
|
||||||
|
//
|
||||||
|
// This function is allowed to allocate memory.
|
||||||
|
using DanglingRawPtrReleasedFn = void(uintptr_t /*id*/);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
DanglingRawPtrReleasedFn* GetDanglingRawPtrReleasedFn();
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn);
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_DANGLING_RAW_PTR_CHECKS_H_
|
91
src/base/allocator/partition_allocator/dot/super-page.dot
Normal file
91
src/base/allocator/partition_allocator/dot/super-page.dot
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
digraph G {
|
||||||
|
graph[bgcolor=transparent]
|
||||||
|
node[shape=plaintext]
|
||||||
|
edge[style=dashed]
|
||||||
|
|
||||||
|
invisible_a[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="red" WIDTH="100"></TD>
|
||||||
|
<TD PORT="green" WIDTH="20"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="40"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="300"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="60"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
superpage[xlabel="Super Page",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" WIDTH="10">
|
||||||
|
<TR>
|
||||||
|
<!-- Head Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
|
||||||
|
<TD PORT="metadata"></TD>
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
|
||||||
|
<!-- Several Slot Spans -->
|
||||||
|
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
|
||||||
|
<TD PORT="blue" BGCOLOR="cornflowerblue" WIDTH="79">2</TD>
|
||||||
|
<TD PORT="gold" BGCOLOR="gold" WIDTH="239">6</TD>
|
||||||
|
<TD PORT="red2" BGCOLOR="crimson" WIDTH="119">3</TD>
|
||||||
|
<TD PORT="pink" BGCOLOR="deeppink" WIDTH="39">1</TD>
|
||||||
|
<TD WIDTH="79">...</TD>
|
||||||
|
<!-- Tail Partition Page -->
|
||||||
|
<TD BGCOLOR="darkgrey" WIDTH="39"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
invisible_b[label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<TD PORT="green" WIDTH="30"></TD>
|
||||||
|
<TD PORT="blue" WIDTH="60"></TD>
|
||||||
|
<TD PORT="gold" WIDTH="180"></TD>
|
||||||
|
<TD PORT="red" WIDTH="90"></TD>
|
||||||
|
<TD PORT="pink" WIDTH="90"></TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
metadata_page[xlabel="Metadata",label=<
|
||||||
|
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
|
||||||
|
<TR>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Green Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="palegreen">v</TD>
|
||||||
|
<!-- Blue Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="cornflowerblue">v</TD>
|
||||||
|
<TD BGCOLOR="cornflowerblue">+</TD>
|
||||||
|
<!-- Gold Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="gold">v</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<TD BGCOLOR="gold">+</TD>
|
||||||
|
<!-- Red Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="crimson">v</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<TD BGCOLOR="crimson">+</TD>
|
||||||
|
<!-- Pink Slot Span Metadata -->
|
||||||
|
<TD BGCOLOR="deeppink">v</TD>
|
||||||
|
<!-- etc. -->
|
||||||
|
<TD WIDTH="64">...</TD>
|
||||||
|
<!-- Guard Page Metadata -->
|
||||||
|
<TD BGCOLOR="darkgrey"> </TD>
|
||||||
|
</TR>
|
||||||
|
</TABLE>
|
||||||
|
>]
|
||||||
|
|
||||||
|
invisible_a:red->superpage:red->superpage:red2[color=crimson]
|
||||||
|
superpage:red2->invisible_b:red[color=crimson]
|
||||||
|
invisible_a:green->superpage:green->invisible_b:green[color=palegreen]
|
||||||
|
invisible_a:blue->superpage:blue->invisible_b:blue[color=cornflowerblue]
|
||||||
|
invisible_a:gold->superpage:gold->invisible_b:gold[color=gold]
|
||||||
|
invisible_a:pink->superpage:pink->invisible_b:pink[color=deeppink]
|
||||||
|
|
||||||
|
superpage:metadata->metadata_page[style="",arrowhead=odot]
|
||||||
|
}
|
BIN
src/base/allocator/partition_allocator/dot/super-page.png
Normal file
BIN
src/base/allocator/partition_allocator/dot/super-page.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 18 KiB |
87
src/base/allocator/partition_allocator/extended_api.cc
Normal file
87
src/base/allocator/partition_allocator/extended_api.cc
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/extended_api.h"
|
||||||
|
|
||||||
|
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||||
|
// Some platforms don't have a thread cache, or it could already have been
|
||||||
|
// disabled.
|
||||||
|
if (!root || !root->flags.with_thread_cache)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
root->flags.with_thread_cache = false;
|
||||||
|
// Doesn't destroy the thread cache object(s). For background threads, they
|
||||||
|
// will be collected (and free cached memory) at thread destruction
|
||||||
|
// time. For the main thread, we leak it.
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnablePartitionAllocThreadCacheForRootIfDisabled(
|
||||||
|
ThreadSafePartitionRoot* root) {
|
||||||
|
if (!root)
|
||||||
|
return;
|
||||||
|
root->flags.with_thread_cache = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
void DisablePartitionAllocThreadCacheForProcess() {
|
||||||
|
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
auto* aligned_allocator =
|
||||||
|
::base::internal::PartitionAllocMalloc::AlignedAllocator();
|
||||||
|
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||||
|
if (aligned_allocator != regular_allocator)
|
||||||
|
DisableThreadCacheForRootIfEnabled(aligned_allocator);
|
||||||
|
DisableThreadCacheForRootIfEnabled(
|
||||||
|
::base::internal::PartitionAllocMalloc::OriginalAllocator());
|
||||||
|
}
|
||||||
|
#endif // defined(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
DisablePartitionAllocThreadCacheForProcess();
|
||||||
|
#else
|
||||||
|
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
ThreadCache::SwapForTesting(root);
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
}
|
||||||
|
|
||||||
|
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
|
||||||
|
// First, disable the test thread cache we have.
|
||||||
|
DisableThreadCacheForRootIfEnabled(root);
|
||||||
|
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||||
|
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||||
|
|
||||||
|
ThreadCache::SwapForTesting(regular_allocator);
|
||||||
|
#else
|
||||||
|
ThreadCache::SwapForTesting(nullptr);
|
||||||
|
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
|
||||||
|
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
24
src/base/allocator/partition_allocator/extended_api.h
Normal file
24
src/base/allocator/partition_allocator/extended_api.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
// These two functions are unsafe to run if there are multiple threads running
|
||||||
|
// in the process.
|
||||||
|
//
|
||||||
|
// Disables the thread cache for the entire process, and replaces it with a
|
||||||
|
// thread cache for |root|.
|
||||||
|
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||||
|
// Disables the current thread cache, and replaces it with the default for the
|
||||||
|
// process.
|
||||||
|
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
132
src/base/allocator/partition_allocator/glossary.md
Normal file
132
src/base/allocator/partition_allocator/glossary.md
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
# Glossary
|
||||||
|
|
||||||
|
This page describes some core terminology used in PartitionAlloc.
|
||||||
|
A weak attempt is made to present terms "in conceptual order" s.t.
|
||||||
|
each term depends mainly upon previously defined ones.
|
||||||
|
|
||||||
|
## Top-Level Terms
|
||||||
|
|
||||||
|
* **Partition**: A heap that is separated and protected both from other
|
||||||
|
partitions and from non-PartitionAlloc memory. Each partition holds
|
||||||
|
multiple buckets.
|
||||||
|
* **Bucket**: A collection of regions in a partition that contains
|
||||||
|
similar-sized objects. For example, one bucket may hold objects of
|
||||||
|
size (224, 256], another (256, 320], etc. Bucket size
|
||||||
|
brackets are geometrically spaced,
|
||||||
|
[going up to `kMaxBucketed`][max-bucket-comment].
|
||||||
|
* **Normal Bucket**: Any bucket whose size ceiling does not exceed
|
||||||
|
`kMaxBucketed`. This is the common case in PartitionAlloc, and
|
||||||
|
the "normal" modifier is often dropped in casual reference.
|
||||||
|
* **Direct Map (Bucket)**: Any allocation whose size exceeds `kMaxBucketed`.
|
||||||
|
|
||||||
|
Buckets consist of slot spans, organized as linked lists (see below).
|
||||||
|
|
||||||
|
## Pages
|
||||||
|
|
||||||
|
* **System Page**: A memory page defined by the CPU/OS. Commonly
|
||||||
|
referred to as a "virtual page" in other contexts. This is typically
|
||||||
|
4KiB, but it can be larger. PartitionAlloc supports up to 64KiB,
|
||||||
|
though this constant isn't always known at compile time (depending
|
||||||
|
on the OS).
|
||||||
|
* **Partition Page**: The most common granularity used by
|
||||||
|
PartitionAlloc. Consists of exactly 4 system pages.
|
||||||
|
* **Super Page**: A 2MiB region, aligned on a 2MiB boundary. Not to
|
||||||
|
be confused with OS-level terms like "large page" or "huge page",
|
||||||
|
which are also commonly 2MiB. These have to be fully committed /
|
||||||
|
uncommitted in memory, whereas super pages can be partially committed
|
||||||
|
with system page granularity.
|
||||||
|
|
||||||
|
## Slots and Spans
|
||||||
|
|
||||||
|
* **Slot**: An indivisible allocation unit. Slot sizes are tied to
|
||||||
|
buckets. For example, each allocation that falls into the bucket
|
||||||
|
(224, 256] would be satisfied with a slot of size 256. This
|
||||||
|
applies only to normal buckets, not to direct map.
|
||||||
|
* **Slot Span**: A run of same-sized slots that are contiguous in
|
||||||
|
memory. Slot span size is a multiple of partition page size, but it
|
||||||
|
isn't always a multiple of slot size, although we try hard for this
|
||||||
|
to be the case.
|
||||||
|
* **Small Bucket**: Allocations up to 4 partition pages. In these
|
||||||
|
cases, slot spans are always between 1 and 4 partition pages in
|
||||||
|
size. For each slot span size, the slot span is chosen to minimize
|
||||||
|
number of pages used while keeping the rounding waste under a
|
||||||
|
reasonable limit.
|
||||||
|
* For example, for a slot size 96, 64B waste is deemed acceptable
|
||||||
|
when using a single partition page, but for slot size
|
||||||
|
384, the potential waste of 256B wouldn't be, so 3 partition pages
|
||||||
|
are used to achieve 0B waste.
|
||||||
|
* PartitionAlloc may avoid waste by lowering the number of committed
|
||||||
|
system pages compared to the number of reserved pages. For
|
||||||
|
example, for the slot size of 896B we'd use a slot span of 2
|
||||||
|
partition pages of 16KiB, i.e. 8 system pages of 4KiB, but commit
|
||||||
|
only up to 7, thus resulting in perfect packing.
|
||||||
|
* **Single-Slot Span**: Allocations above 4 partition pages (but
|
||||||
|
≤`kMaxBucketed`). This is because each slot span is guaranteed to
|
||||||
|
hold exactly one slot.
|
||||||
|
* Fun fact: there are sizes ≤4 partition pages that result in a
|
||||||
|
slot span having exactly 1 slot, but nonetheless they're still
|
||||||
|
classified as small buckets. The reason is that single-slot spans
|
||||||
|
are often handled by a different code path, and that distinction
|
||||||
|
is made purely based on slot size, for simplicity and efficiency.
|
||||||
|
|
||||||
|
## Other Terms
|
||||||
|
|
||||||
|
* **Object**: A chunk of memory returned to the allocating invoker
|
||||||
|
of the size requested. It doesn't have to span the entire slot,
|
||||||
|
nor does it have to begin at the slot start. This term is commonly
|
||||||
|
used as a parameter name in PartitionAlloc code, as opposed to
|
||||||
|
`slot_start`.
|
||||||
|
* **Thread Cache**: A [thread-local structure][pa-thread-cache] that
|
||||||
|
holds some not-too-large memory chunks, ready to be allocated. This
|
||||||
|
speeds up in-thread allocation by reducing a lock hold to a
|
||||||
|
thread-local storage lookup, improving cache locality.
|
||||||
|
* **GigaCage**: A memory region several gigabytes wide, reserved by
|
||||||
|
PartitionAlloc upon initialization, from which all allocations are
|
||||||
|
taken. The motivation for GigaCage is for code to be able to examine
|
||||||
|
a pointer and to immediately determine whether or not the memory was
|
||||||
|
allocated by PartitionAlloc. This provides support for a number of
|
||||||
|
features, including
|
||||||
|
[StarScan][starscan-readme] and
|
||||||
|
[BackupRefPtr][brp-doc].
|
||||||
|
* Note that GigaCage only exists in builds with 64-bit pointers.
|
||||||
|
* In builds with 32-bit pointers, PartitionAlloc tracks pointers
|
||||||
|
it dispenses with a bitmap. This is often referred to as "fake
|
||||||
|
GigaCage" (or simply "GigaCage") for lack of a better term.
|
||||||
|
* **Payload**: The usable area of a super page in which slot spans
|
||||||
|
reside. While generally this means "everything between the first
|
||||||
|
and last guard partition pages in a super page," the presence of
|
||||||
|
other metadata (e.g. StarScan bitmaps) can bump the starting offset
|
||||||
|
forward. While this term is entrenched in the code, the team
|
||||||
|
considers it suboptimal and is actively looking for a replacement.
|
||||||
|
|
||||||
|
## PartitionAlloc-Everywhere
|
||||||
|
|
||||||
|
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
|
||||||
|
It was invoked explicitly, by calling PartitionAlloc APIs directly.
|
||||||
|
|
||||||
|
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
|
||||||
|
to the entire-ish codebase (exclusions apply). This was done by intercepting
|
||||||
|
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
|
||||||
|
routing them into PartitionAlloc. The shim located in
|
||||||
|
`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
|
||||||
|
responsible for intercepting. For more details, see
|
||||||
|
[base/allocator/README.md](../../../base/allocator/README.md).
|
||||||
|
|
||||||
|
A special, catch-it-all *Malloc* partition has been created for the intercepted
|
||||||
|
`malloc()` et al. This is to isolate from already existing Blink partitions.
|
||||||
|
The only exception from that is Blink's *FastMalloc* partition, which was also
|
||||||
|
catch-it-all in nature, so it's perfectly fine to merge these together, to
|
||||||
|
minimize fragmentation.
|
||||||
|
|
||||||
|
As of 2022, PartitionAlloc-Everywhere is supported on
|
||||||
|
|
||||||
|
* Windows 32- and 64-bit
|
||||||
|
* Linux
|
||||||
|
* Android 32- and 64-bit
|
||||||
|
* macOS
|
||||||
|
* Fuchsia
|
||||||
|
|
||||||
|
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
|
||||||
|
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
|
||||||
|
[starscan-readme]: https://chromium.googlesource.com/chromium/src/+/main/base/allocator/partition_allocator/starscan/README.md
|
||||||
|
[brp-doc]: https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/preview
|
96
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
96
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
|
||||||
|
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
|
||||||
|
// cause significant jank.
|
||||||
|
#define PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM 0
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// static
|
||||||
|
MemoryReclaimer* MemoryReclaimer::Instance() {
|
||||||
|
static internal::base::NoDestructor<MemoryReclaimer> instance;
|
||||||
|
return instance.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::RegisterPartition(PartitionRoot<>* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
auto it_and_whether_inserted = partitions_.insert(partition);
|
||||||
|
PA_DCHECK(it_and_whether_inserted.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::UnregisterPartition(
|
||||||
|
PartitionRoot<internal::ThreadSafe>* partition) {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
PA_DCHECK(partition);
|
||||||
|
size_t erased_count = partitions_.erase(partition);
|
||||||
|
PA_DCHECK(erased_count == 1u);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryReclaimer::MemoryReclaimer() = default;
|
||||||
|
MemoryReclaimer::~MemoryReclaimer() = default;
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimAll() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages |
|
||||||
|
PurgeFlags::kAggressiveReclaim;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ReclaimNormal() {
|
||||||
|
constexpr int kFlags = PurgeFlags::kDecommitEmptySlotSpans |
|
||||||
|
PurgeFlags::kDiscardUnusedSystemPages;
|
||||||
|
Reclaim(kFlags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::Reclaim(int flags) {
|
||||||
|
internal::ScopedGuard lock(
|
||||||
|
lock_); // Has to protect from concurrent (Un)Register calls.
|
||||||
|
|
||||||
|
// PCScan quarantines freed slots. Trigger the scan first to let it call
|
||||||
|
// FreeNoHooksImmediate on slots that pass the quarantine.
|
||||||
|
//
|
||||||
|
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
|
||||||
|
// so that the slots are actually freed. (This is done synchronously only for
|
||||||
|
// the current thread.)
|
||||||
|
//
|
||||||
|
// Lastly decommit empty slot spans and lastly try to discard unused pages at
|
||||||
|
// the end of the remaining active slots.
|
||||||
|
#if PA_STARSCAN_ENABLE_STARSCAN_ON_RECLAIM
|
||||||
|
{
|
||||||
|
using PCScan = internal::PCScan;
|
||||||
|
const auto invocation_mode = flags & PurgeFlags::kAggressiveReclaim
|
||||||
|
? PCScan::InvocationMode::kForcedBlocking
|
||||||
|
: PCScan::InvocationMode::kBlocking;
|
||||||
|
PCScan::PerformScanIfNeeded(invocation_mode);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||||
|
// Don't completely empty the thread cache outside of low memory situations,
|
||||||
|
// as there is periodic purge which makes sure that it doesn't take too much
|
||||||
|
// space.
|
||||||
|
if (flags & PurgeFlags::kAggressiveReclaim)
|
||||||
|
ThreadCacheRegistry::Instance().PurgeAll();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (auto* partition : partitions_)
|
||||||
|
partition->PurgeMemory(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryReclaimer::ResetForTesting() {
|
||||||
|
internal::ScopedGuard lock(lock_);
|
||||||
|
partitions_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
82
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
82
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Posts and handles memory reclaim tasks for PartitionAlloc.
|
||||||
|
//
|
||||||
|
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
|
||||||
|
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
|
||||||
|
// context of the provided |SequencedTaskRunner|, meaning that the caller must
|
||||||
|
// take care of this runner being compatible with the various partitions.
|
||||||
|
//
|
||||||
|
// Singleton as this runs as long as the process is alive, and
|
||||||
|
// having multiple instances would be wasteful.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
|
||||||
|
public:
|
||||||
|
static MemoryReclaimer* Instance();
|
||||||
|
|
||||||
|
MemoryReclaimer(const MemoryReclaimer&) = delete;
|
||||||
|
MemoryReclaimer& operator=(const MemoryReclaimer&) = delete;
|
||||||
|
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Registers a partition to be tracked by the reclaimer.
|
||||||
|
void RegisterPartition(PartitionRoot<>* partition);
|
||||||
|
// Internal. Do not use.
|
||||||
|
// Unregisters a partition to be tracked by the reclaimer.
|
||||||
|
void UnregisterPartition(PartitionRoot<>* partition);
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now to reclaim as much free memory as
|
||||||
|
// possible. The API callers need to invoke this method periodically
|
||||||
|
// if they want to use memory reclaimer.
|
||||||
|
// See also GetRecommendedReclaimIntervalInMicroseconds()'s comment.
|
||||||
|
void ReclaimNormal();
|
||||||
|
|
||||||
|
// Returns a recommended interval to invoke ReclaimNormal.
|
||||||
|
int64_t GetRecommendedReclaimIntervalInMicroseconds() {
|
||||||
|
return internal::base::Seconds(4).InMicroseconds();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Triggers an explicit reclaim now reclaiming all free memory
|
||||||
|
void ReclaimAll();
|
||||||
|
|
||||||
|
private:
|
||||||
|
MemoryReclaimer();
|
||||||
|
~MemoryReclaimer();
|
||||||
|
// |flags| is an OR of base::PartitionPurgeFlags
|
||||||
|
void Reclaim(int flags);
|
||||||
|
void ReclaimAndReschedule();
|
||||||
|
void ResetForTesting();
|
||||||
|
|
||||||
|
internal::Lock lock_;
|
||||||
|
std::set<PartitionRoot<>*> partitions_ PA_GUARDED_BY(lock_);
|
||||||
|
|
||||||
|
friend class internal::base::NoDestructor<MemoryReclaimer>;
|
||||||
|
friend class MemoryReclaimerTest;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||||
|
// the migration to the new namespaces gets done.
|
||||||
|
using ::partition_alloc::MemoryReclaimer;
|
||||||
|
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
81
src/base/allocator/partition_allocator/oom.cc
Normal file
81
src/base/allocator/partition_allocator/oom.cc
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
size_t g_oom_size = 0U;
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Crash server classifies base::internal::OnNoMemoryInternal as OOM.
|
||||||
|
// TODO(crbug.com/1151236): Update to
|
||||||
|
// partition_alloc::internal::base::internal::OnNoMemoryInternal
|
||||||
|
PA_NOINLINE void OnNoMemoryInternal(size_t size) {
|
||||||
|
g_oom_size = size;
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
// Kill the process. This is important for security since most of code
|
||||||
|
// does not check the result of memory allocation.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/het71c37.aspx
|
||||||
|
// Pass the size of the failed request in an exception argument.
|
||||||
|
ULONG_PTR exception_args[] = {size};
|
||||||
|
::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
|
||||||
|
std::size(exception_args), exception_args);
|
||||||
|
|
||||||
|
// Safety check, make sure process exits here.
|
||||||
|
_exit(win::kOomExceptionCode);
|
||||||
|
#else
|
||||||
|
size_t tmp_size = size;
|
||||||
|
internal::base::debug::Alias(&tmp_size);
|
||||||
|
|
||||||
|
// Note: Don't add anything that may allocate here. Depending on the
|
||||||
|
// allocator, this may be called from within the allocator (e.g. with
|
||||||
|
// PartitionAlloc), and would deadlock as our locks are not recursive.
|
||||||
|
//
|
||||||
|
// Additionally, this is unlikely to work, since allocating from an OOM
|
||||||
|
// handler is likely to fail.
|
||||||
|
//
|
||||||
|
// Use PA_IMMEDIATE_CRASH() so that the top frame in the crash is our code,
|
||||||
|
// rather than using abort() or similar; this avoids the crash server needing
|
||||||
|
// to be able to successfully unwind through libc to get to the correct
|
||||||
|
// address, which is particularly an issue on Android.
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
void TerminateBecauseOutOfMemory(size_t size) {
|
||||||
|
internal::OnNoMemoryInternal(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||||
|
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||||
|
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||||
|
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
|
||||||
|
RunPartitionAllocOomCallback();
|
||||||
|
TerminateBecauseOutOfMemory(size);
|
||||||
|
PA_IMMEDIATE_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
70
src/base/allocator/partition_allocator/oom.h
Normal file
70
src/base/allocator/partition_allocator/oom.h
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
// Terminates process. Should be called only for out of memory errors.
|
||||||
|
// |size| is the size of the failed allocation, or 0 if not known.
|
||||||
|
// Crash reporting classifies such crashes as OOM.
|
||||||
|
// Must be allocation-safe.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void TerminateBecauseOutOfMemory(size_t size);
|
||||||
|
|
||||||
|
// Records the size of the allocation that caused the current OOM crash, for
|
||||||
|
// consumption by Breakpad.
|
||||||
|
// TODO: this can be removed when Breakpad is no longer supported.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern size_t g_oom_size;
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
namespace win {
|
||||||
|
|
||||||
|
// Custom Windows exception code chosen to indicate an out of memory error.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
|
||||||
|
// "To make sure that you do not define a code that conflicts with an existing
|
||||||
|
// exception code" ... "The resulting error code should therefore have the
|
||||||
|
// highest four bits set to hexadecimal E."
|
||||||
|
// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
|
||||||
|
const DWORD kOomExceptionCode = 0xe0000008;
|
||||||
|
|
||||||
|
} // namespace win
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||||
|
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||||
|
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||||
|
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
|
||||||
|
OnNoMemory(size_t size);
|
||||||
|
|
||||||
|
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
|
||||||
|
// exception on Windows to signal this is OOM and not a normal assert.
|
||||||
|
// OOM_CRASH(size) is called by users of PageAllocator (including
|
||||||
|
// PartitionAlloc) to signify an allocation failure from the platform.
|
||||||
|
#define OOM_CRASH(size) \
|
||||||
|
do { \
|
||||||
|
/* Raising an exception might allocate, allow that. */ \
|
||||||
|
::partition_alloc::ScopedAllowAllocations guard{}; \
|
||||||
|
::partition_alloc::internal::OnNoMemory(size); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
PartitionAllocOomCallback g_oom_callback;
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
|
||||||
|
PA_DCHECK(!g_oom_callback);
|
||||||
|
g_oom_callback = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
void RunPartitionAllocOomCallback() {
|
||||||
|
if (g_oom_callback)
|
||||||
|
g_oom_callback();
|
||||||
|
}
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
26
src/base/allocator/partition_allocator/oom_callback.h
Normal file
26
src/base/allocator/partition_allocator/oom_callback.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
using PartitionAllocOomCallback = void (*)();
|
||||||
|
|
||||||
|
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
|
||||||
|
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
|
||||||
|
// allocation failure from the platform.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback);
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RunPartitionAllocOomCallback();
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
382
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
382
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
@ -0,0 +1,382 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
|
||||||
|
#elif BUILDFLAG(IS_POSIX)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
|
||||||
|
#elif BUILDFLAG(IS_FUCHSIA)
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
|
||||||
|
#else
|
||||||
|
#error Platform not supported.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
internal::Lock g_reserve_lock;
|
||||||
|
|
||||||
|
// We may reserve/release address space on different threads.
|
||||||
|
internal::Lock& GetReserveLock() {
|
||||||
|
return g_reserve_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::atomic<size_t> g_total_mapped_address_space;
|
||||||
|
|
||||||
|
// We only support a single block of reserved address space.
|
||||||
|
uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
|
||||||
|
size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
|
||||||
|
|
||||||
|
uintptr_t AllocPagesIncludingReserved(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
uintptr_t ret =
|
||||||
|
internal::SystemAllocPages(address, length, accessibility, page_tag);
|
||||||
|
if (!ret) {
|
||||||
|
const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
|
||||||
|
if (cant_alloc_length) {
|
||||||
|
// The system cannot allocate |length| bytes. Release any reserved address
|
||||||
|
// space and try once more.
|
||||||
|
ReleaseReservation();
|
||||||
|
ret =
|
||||||
|
internal::SystemAllocPages(address, length, accessibility, page_tag);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trims memory at |base_address| to given |trim_length| and |alignment|.
|
||||||
|
//
|
||||||
|
// On failure, on Windows, this function returns 0 and frees memory at
|
||||||
|
// |base_address|.
|
||||||
|
uintptr_t TrimMapping(uintptr_t base_address,
|
||||||
|
size_t base_length,
|
||||||
|
size_t trim_length,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t alignment_offset,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(base_length >= trim_length);
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||||
|
PA_DCHECK(alignment_offset < alignment);
|
||||||
|
uintptr_t new_base =
|
||||||
|
NextAlignedWithOffset(base_address, alignment, alignment_offset);
|
||||||
|
PA_DCHECK(new_base >= base_address);
|
||||||
|
size_t pre_slack = new_base - base_address;
|
||||||
|
size_t post_slack = base_length - pre_slack - trim_length;
|
||||||
|
PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
|
||||||
|
PA_DCHECK(pre_slack < base_length);
|
||||||
|
PA_DCHECK(post_slack < base_length);
|
||||||
|
return internal::TrimMappingInternal(base_address, base_length, trim_length,
|
||||||
|
accessibility, pre_slack, post_slack);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// Align |address| up to the closest, non-smaller address, that gives
|
||||||
|
// |requested_offset| remainder modulo |alignment|.
|
||||||
|
//
|
||||||
|
// Examples for alignment=1024 and requested_offset=64:
|
||||||
|
// 64 -> 64
|
||||||
|
// 65 -> 1088
|
||||||
|
// 1024 -> 1088
|
||||||
|
// 1088 -> 1088
|
||||||
|
// 1089 -> 2112
|
||||||
|
// 2048 -> 2112
|
||||||
|
uintptr_t NextAlignedWithOffset(uintptr_t address,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t requested_offset) {
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||||
|
PA_DCHECK(requested_offset < alignment);
|
||||||
|
|
||||||
|
uintptr_t actual_offset = address & (alignment - 1);
|
||||||
|
uintptr_t new_address;
|
||||||
|
if (actual_offset <= requested_offset)
|
||||||
|
new_address = address + requested_offset - actual_offset;
|
||||||
|
else
|
||||||
|
new_address = address + alignment + requested_offset - actual_offset;
|
||||||
|
PA_DCHECK(new_address >= address);
|
||||||
|
PA_DCHECK(new_address - address < alignment);
|
||||||
|
PA_DCHECK(new_address % alignment == requested_offset);
|
||||||
|
|
||||||
|
return new_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPages(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t ret =
|
||||||
|
internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
|
||||||
|
if (ret)
|
||||||
|
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
uintptr_t AllocPages(size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
|
||||||
|
page_tag);
|
||||||
|
}
|
||||||
|
uintptr_t AllocPages(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
|
||||||
|
page_tag);
|
||||||
|
}
|
||||||
|
void* AllocPages(void* address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
return reinterpret_cast<void*>(
|
||||||
|
AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
|
||||||
|
accessibility, page_tag));
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t AllocPagesWithAlignOffset(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
size_t align_offset,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
PA_DCHECK(length >= internal::PageAllocationGranularity());
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(align >= internal::PageAllocationGranularity());
|
||||||
|
// Alignment must be power of 2 for masking math to work.
|
||||||
|
PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
|
||||||
|
PA_DCHECK(align_offset < align);
|
||||||
|
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
uintptr_t align_offset_mask = align - 1;
|
||||||
|
uintptr_t align_base_mask = ~align_offset_mask;
|
||||||
|
PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
|
||||||
|
|
||||||
|
// If the client passed null as the address, choose a good one.
|
||||||
|
if (!address) {
|
||||||
|
address = (GetRandomPageBase() & align_base_mask) + align_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// First try to force an exact-size, aligned allocation from our random base.
|
||||||
|
#if defined(ARCH_CPU_32_BITS)
|
||||||
|
// On 32 bit systems, first try one random aligned address, and then try an
|
||||||
|
// aligned address derived from the value of |ret|.
|
||||||
|
constexpr int kExactSizeTries = 2;
|
||||||
|
#else
|
||||||
|
// On 64 bit systems, try 3 random aligned addresses.
|
||||||
|
constexpr int kExactSizeTries = 3;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (int i = 0; i < kExactSizeTries; ++i) {
|
||||||
|
uintptr_t ret =
|
||||||
|
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
|
||||||
|
if (ret) {
|
||||||
|
// If the alignment is to our liking, we're done.
|
||||||
|
if ((ret & align_offset_mask) == align_offset)
|
||||||
|
return ret;
|
||||||
|
// Free the memory and try again.
|
||||||
|
FreePages(ret, length);
|
||||||
|
} else {
|
||||||
|
// |ret| is null; if this try was unhinted, we're OOM.
|
||||||
|
if (internal::kHintIsAdvisory || !address)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(ARCH_CPU_32_BITS)
|
||||||
|
// For small address spaces, try the first aligned address >= |ret|. Note
|
||||||
|
// |ret| may be null, in which case |address| becomes null. If
|
||||||
|
// |align_offset| is non-zero, this calculation may get us not the first,
|
||||||
|
// but the next matching address.
|
||||||
|
address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
|
||||||
|
#else // defined(ARCH_CPU_64_BITS)
|
||||||
|
// Keep trying random addresses on systems that have a large address space.
|
||||||
|
address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a larger allocation so we can force alignment.
|
||||||
|
size_t try_length = length + (align - internal::PageAllocationGranularity());
|
||||||
|
PA_CHECK(try_length >= length);
|
||||||
|
uintptr_t ret;
|
||||||
|
|
||||||
|
do {
|
||||||
|
// Continue randomizing only on POSIX.
|
||||||
|
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
|
||||||
|
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
|
||||||
|
page_tag);
|
||||||
|
// The retries are for Windows, where a race can steal our mapping on
|
||||||
|
// resize.
|
||||||
|
} while (ret && (ret = TrimMapping(ret, try_length, length, align,
|
||||||
|
align_offset, accessibility)) == 0);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
internal::FreePagesInternal(address, length);
|
||||||
|
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
|
||||||
|
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
void FreePages(void* address, size_t length) {
|
||||||
|
FreePages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TrySetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
return internal::TrySetSystemPagesAccessInternal(address, length,
|
||||||
|
accessibility);
|
||||||
|
}
|
||||||
|
bool TrySetSystemPagesAccess(void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
|
||||||
|
accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::SetSystemPagesAccessInternal(address, length, accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DecommitSystemPagesInternal(address, length,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
void DecommitSystemPages(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DecommitAndZeroSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
void DecommitAndZeroSystemPages(void* address, size_t length) {
|
||||||
|
DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
internal::RecommitSystemPagesInternal(address, length, accessibility,
|
||||||
|
accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TryRecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// Duplicated because we want errors to be reported at a lower level in the
|
||||||
|
// crashing case.
|
||||||
|
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
return internal::TryRecommitSystemPagesInternal(
|
||||||
|
address, length, accessibility, accessibility_disposition);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DiscardSystemPages(uintptr_t address, size_t length) {
|
||||||
|
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
|
||||||
|
internal::DiscardSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
void DiscardSystemPages(void* address, size_t length) {
|
||||||
|
DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ReserveAddressSpace(size_t size) {
|
||||||
|
// To avoid deadlock, call only SystemAllocPages.
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
if (!s_reservation_address) {
|
||||||
|
uintptr_t mem = internal::SystemAllocPages(
|
||||||
|
0, size, PageAccessibilityConfiguration::kInaccessible,
|
||||||
|
PageTag::kChromium);
|
||||||
|
if (mem) {
|
||||||
|
// We guarantee this alignment when reserving address space.
|
||||||
|
PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
|
||||||
|
s_reservation_address = mem;
|
||||||
|
s_reservation_size = size;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ReleaseReservation() {
|
||||||
|
// To avoid deadlock, call only FreePages.
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
if (!s_reservation_address)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
FreePages(s_reservation_address, s_reservation_size);
|
||||||
|
s_reservation_address = 0;
|
||||||
|
s_reservation_size = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool HasReservationForTesting() {
|
||||||
|
internal::ScopedGuard guard(GetReserveLock());
|
||||||
|
return s_reservation_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t GetAllocPageErrorCode() {
|
||||||
|
return internal::s_allocPageErrorCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetTotalMappedSize() {
|
||||||
|
return g_total_mapped_address_space;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
363
src/base/allocator/partition_allocator/page_allocator.h
Normal file
363
src/base/allocator/partition_allocator/page_allocator.h
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
enum class PageAccessibilityConfiguration {
|
||||||
|
kInaccessible,
|
||||||
|
kRead,
|
||||||
|
kReadWrite,
|
||||||
|
// This flag is mapped to kReadWrite on systems that
|
||||||
|
// don't support MTE.
|
||||||
|
kReadWriteTagged,
|
||||||
|
// This flag is mapped to kReadExecute on systems
|
||||||
|
// that don't support Arm's BTI.
|
||||||
|
kReadExecuteProtected,
|
||||||
|
kReadExecute,
|
||||||
|
// This flag is deprecated and will go away soon.
|
||||||
|
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
|
||||||
|
kReadWriteExecute,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use for De/RecommitSystemPages API.
|
||||||
|
enum class PageAccessibilityDisposition {
|
||||||
|
// Enforces permission update (Decommit will set to
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible;
|
||||||
|
// Recommit will set to whatever was requested, other than
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible).
|
||||||
|
kRequireUpdate,
|
||||||
|
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
|
||||||
|
// only).
|
||||||
|
kAllowKeepForPerf,
|
||||||
|
};
|
||||||
|
|
||||||
|
// macOS supports tagged memory regions, to help in debugging. On Android,
|
||||||
|
// these tags are used to name anonymous mappings.
|
||||||
|
enum class PageTag {
|
||||||
|
kFirst = 240, // Minimum tag value.
|
||||||
|
kBlinkGC = 252, // Blink GC pages.
|
||||||
|
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
|
||||||
|
kChromium = 254, // Chromium page.
|
||||||
|
kV8 = 255, // V8 heap pages.
|
||||||
|
kLast = kV8 // Maximum tag value.
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t NextAlignedWithOffset(uintptr_t ptr,
|
||||||
|
uintptr_t alignment,
|
||||||
|
uintptr_t requested_offset);
|
||||||
|
|
||||||
|
// Allocates one or more pages.
|
||||||
|
//
|
||||||
|
// The requested |address| is just a hint; the actual address returned may
|
||||||
|
// differ. The returned address will be aligned to |align_offset| modulo |align|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
|
||||||
|
// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
|
||||||
|
// |align_offset| must be less than |align|. |align| must be a power of two.
|
||||||
|
//
|
||||||
|
// If |address| is 0/nullptr, then a suitable and randomized address will be
|
||||||
|
// chosen automatically.
|
||||||
|
//
|
||||||
|
// |accessibility| controls the permission of the allocated pages.
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible means uncommitted.
|
||||||
|
//
|
||||||
|
// |page_tag| is used on some platforms to identify the source of the
|
||||||
|
// allocation. Use PageTag::kChromium as a catch-all category.
|
||||||
|
//
|
||||||
|
// This call will return 0/nullptr if the allocation cannot be satisfied.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPages(size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPages(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void* AllocPages(void* address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
uintptr_t AllocPagesWithAlignOffset(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
size_t align,
|
||||||
|
size_t align_offset,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
|
||||||
|
// Frees one or more pages starting at |address| and continuing for |length|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
|
||||||
|
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
|
||||||
|
// |length| must be a multiple of |PageAllocationGranularity()|.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void FreePages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void FreePages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Marks one or more system pages, starting at |address| with the given
|
||||||
|
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// Returns true if the permission change succeeded. In most cases you must
|
||||||
|
// |CHECK| the result.
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TrySetSystemPagesAccess(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
|
||||||
|
// Marks one or more system pages, starting at |address| with the given
|
||||||
|
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||||
|
// bytes.
|
||||||
|
//
|
||||||
|
// Performs a CHECK that the operation succeeds.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetSystemPagesAccess(uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void SetSystemPagesAccess(void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility);
|
||||||
|
|
||||||
|
// Decommits one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||||
|
// boundary.
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed!
|
||||||
|
//
|
||||||
|
// If disposition is PageAccessibilityDisposition::kRequireUpdate (recommended),
|
||||||
|
// the decommitted pages will be made inaccessible before the call returns.
|
||||||
|
// While it is always a programming error to access decommitted pages without
|
||||||
|
// first recommitting them, callers may use
|
||||||
|
// PageAccessibilityDisposition::kAllowKeepForPerf to allow the implementation
|
||||||
|
// to skip changing permissions (use with care), for performance reasons (see
|
||||||
|
// crrev.com/c/2567282 and crrev.com/c/2563038 for perf regressions encountered
|
||||||
|
// in the past). Implementations may choose to always modify permissions, hence
|
||||||
|
// accessing those pages may or may not trigger a fault.
|
||||||
|
//
|
||||||
|
// Decommitting means that physical resources (RAM or swap/pagefile) backing the
|
||||||
|
// allocated virtual address range may be released back to the system, but the
|
||||||
|
// address space is still allocated to the process (possibly using up page table
|
||||||
|
// entries or other accounting resources). There is no guarantee that the pages
|
||||||
|
// are zeroed, unless |DecommittedMemoryIsAlwaysZeroed()| is true.
|
||||||
|
//
|
||||||
|
// This operation may not be atomic on some platforms.
|
||||||
|
//
|
||||||
|
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
|
||||||
|
// processes will not fault when touching a committed memory region. There is
|
||||||
|
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
|
||||||
|
// best-effort allocated resources on the first touch. If
|
||||||
|
// PageAccessibilityDisposition::kRequireUpdate disposition is used, this API
|
||||||
|
// behaves in a platform-agnostic way by simulating the Windows "decommit" state
|
||||||
|
// by both discarding the region (allowing the OS to avoid swap operations)
|
||||||
|
// *and* changing the page protections so accesses fault.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitSystemPages(
|
||||||
|
void* address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Decommits one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||||
|
// boundary.
|
||||||
|
//
|
||||||
|
// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
|
||||||
|
// zeroed and will always mark the region as inaccessible (the equivalent of
|
||||||
|
// setting them to PageAccessibilityConfiguration::kInaccessible).
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitAndZeroSystemPages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DecommitAndZeroSystemPages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Whether decommitted memory is guaranteed to be zeroed when it is
|
||||||
|
// recommitted. Do not assume that this will not change over time.
|
||||||
|
constexpr PA_COMPONENT_EXPORT(
|
||||||
|
PARTITION_ALLOC) bool DecommittedMemoryIsAlwaysZeroed() {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
return false;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// (Re)Commits one or more system pages, starting at |address| and continuing
|
||||||
|
// for |length| bytes with the given |page_accessibility| (must not be
|
||||||
|
// PageAccessibilityConfiguration::kInaccessible). |address| and |length|
|
||||||
|
// must be aligned to a system page boundary.
|
||||||
|
//
|
||||||
|
// This API will crash if the operation cannot be performed!
|
||||||
|
//
|
||||||
|
// If disposition is PageAccessibilityConfiguration::kRequireUpdate, the calls
|
||||||
|
// updates the pages to |page_accessibility|. This can be used regardless of
|
||||||
|
// what disposition was used to decommit the pages.
|
||||||
|
// PageAccessibilityConfiguration::kAllowKeepForPerf allows the implementation
|
||||||
|
// to leave the page permissions, if that improves performance. This option can
|
||||||
|
// only be used if the pages were previously accessible and decommitted with
|
||||||
|
// that same option.
|
||||||
|
//
|
||||||
|
// The memory will be zeroed when it is committed for the first time. However,
|
||||||
|
// there is no such guarantee when memory is recommitted, unless
|
||||||
|
// |DecommittedMemoryIsAlwaysZeroed()| is true.
|
||||||
|
//
|
||||||
|
// This operation may not be atomic on some platforms.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void RecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Like RecommitSystemPages(), but returns false instead of crashing.
|
||||||
|
[[nodiscard]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool TryRecommitSystemPages(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration page_accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition);
|
||||||
|
|
||||||
|
// Discard one or more system pages starting at |address| and continuing for
|
||||||
|
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
|
||||||
|
//
|
||||||
|
// Discarding is a hint to the system that the page is no longer required. The
|
||||||
|
// hint may:
|
||||||
|
// - Do nothing.
|
||||||
|
// - Discard the page immediately, freeing up physical pages.
|
||||||
|
// - Discard the page at some time in the future in response to memory
|
||||||
|
// pressure.
|
||||||
|
//
|
||||||
|
// Only committed pages should be discarded. Discarding a page does not decommit
|
||||||
|
// it, and it is valid to discard an already-discarded page. A read or write to
|
||||||
|
// a discarded page will not fault.
|
||||||
|
//
|
||||||
|
// Reading from a discarded page may return the original page content, or a page
|
||||||
|
// full of zeroes.
|
||||||
|
//
|
||||||
|
// Writing to a discarded page is the only guaranteed way to tell the system
|
||||||
|
// that the page is required again. Once written to, the content of the page is
|
||||||
|
// guaranteed stable once more. After being written to, the page content may be
|
||||||
|
// based on the original page content, or a page of zeroes.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DiscardSystemPages(uintptr_t address, size_t length);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void DiscardSystemPages(void* address, size_t length);
|
||||||
|
|
||||||
|
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
|
||||||
|
// 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundUpToSystemPage(uintptr_t address) {
|
||||||
|
return (address + internal::SystemPageOffsetMask()) &
|
||||||
|
internal::SystemPageBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
|
||||||
|
// 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundDownToSystemPage(uintptr_t address) {
|
||||||
|
return address & internal::SystemPageBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
|
||||||
|
// Returns 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundUpToPageAllocationGranularity(uintptr_t address) {
|
||||||
|
return (address + internal::PageAllocationGranularityOffsetMask()) &
|
||||||
|
internal::PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rounds down |address| to the previous multiple of
|
||||||
|
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||||
|
RoundDownToPageAllocationGranularity(uintptr_t address) {
|
||||||
|
return address & internal::PageAllocationGranularityBaseMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserves (at least) |size| bytes of address space, aligned to
|
||||||
|
// |PageAllocationGranularity()|. This can be called early on to make it more
|
||||||
|
// likely that large allocations will succeed. Returns true if the reservation
|
||||||
|
// succeeded, false if the reservation failed or a reservation was already made.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReserveAddressSpace(size_t size);
|
||||||
|
|
||||||
|
// Releases any reserved address space. |AllocPages| calls this automatically on
|
||||||
|
// an allocation failure. External allocators may also call this on failure.
|
||||||
|
//
|
||||||
|
// Returns true when an existing reservation was released.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool ReleaseReservation();
|
||||||
|
|
||||||
|
// Returns true if there is currently an address space reservation.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool HasReservationForTesting();
|
||||||
|
|
||||||
|
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
|
||||||
|
// (POSIX) or |VirtualAlloc| (Windows) fails.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) uint32_t GetAllocPageErrorCode();
|
||||||
|
|
||||||
|
// Returns the total amount of mapped pages from all clients of
|
||||||
|
// PageAllocator. These pages may or may not be committed. This is mostly useful
|
||||||
|
// to assess address space pressure.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||||
|
// the migration to the new namespaces gets done.
|
||||||
|
using ::partition_alloc::AllocPages;
|
||||||
|
using ::partition_alloc::AllocPagesWithAlignOffset;
|
||||||
|
using ::partition_alloc::DecommitAndZeroSystemPages;
|
||||||
|
using ::partition_alloc::DecommitSystemPages;
|
||||||
|
using ::partition_alloc::DecommittedMemoryIsAlwaysZeroed;
|
||||||
|
using ::partition_alloc::DiscardSystemPages;
|
||||||
|
using ::partition_alloc::FreePages;
|
||||||
|
using ::partition_alloc::GetAllocPageErrorCode;
|
||||||
|
using ::partition_alloc::GetTotalMappedSize;
|
||||||
|
using ::partition_alloc::HasReservationForTesting;
|
||||||
|
using ::partition_alloc::NextAlignedWithOffset;
|
||||||
|
using ::partition_alloc::PageAccessibilityConfiguration;
|
||||||
|
using ::partition_alloc::PageAccessibilityDisposition;
|
||||||
|
using ::partition_alloc::PageTag;
|
||||||
|
using ::partition_alloc::RecommitSystemPages;
|
||||||
|
using ::partition_alloc::ReleaseReservation;
|
||||||
|
using ::partition_alloc::ReserveAddressSpace;
|
||||||
|
using ::partition_alloc::RoundDownToPageAllocationGranularity;
|
||||||
|
using ::partition_alloc::RoundDownToSystemPage;
|
||||||
|
using ::partition_alloc::RoundUpToPageAllocationGranularity;
|
||||||
|
using ::partition_alloc::RoundUpToSystemPage;
|
||||||
|
using ::partition_alloc::SetSystemPagesAccess;
|
||||||
|
using ::partition_alloc::TryRecommitSystemPages;
|
||||||
|
using ::partition_alloc::TrySetSystemPagesAccess;
|
||||||
|
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
@ -0,0 +1,166 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
|
||||||
|
#include <mach/vm_page_size.h>
|
||||||
|
|
||||||
|
// Although page allocator constants are not constexpr, they are run-time
|
||||||
|
// constant. Because the underlying variables they access, such as vm_page_size,
|
||||||
|
// are not marked const, the compiler normally has no way to know that they
|
||||||
|
// don’t change and must obtain their values whenever it can't prove that they
|
||||||
|
// haven't been modified, even if they had already been obtained previously.
|
||||||
|
// Attaching __attribute__((const)) to these declarations allows these redundant
|
||||||
|
// accesses to be omitted under optimization such as common subexpression
|
||||||
|
// elimination.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||||
|
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// This should work for all POSIX (if needed), but currently all other
|
||||||
|
// supported OS/architecture combinations use either hard-coded values
|
||||||
|
// (such as x86) or have means to determine these values without needing
|
||||||
|
// atomics (such as macOS on arm64).
|
||||||
|
|
||||||
|
// Page allocator constants are run-time constant
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||||
|
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// Holds the current page size and shift, where size = 1 << shift
|
||||||
|
// Use PageAllocationGranularity(), PageAllocationGranularityShift()
|
||||||
|
// to initialize and retrieve these values safely.
|
||||||
|
struct PageCharacteristics {
|
||||||
|
std::atomic<int> size;
|
||||||
|
std::atomic<int> shift;
|
||||||
|
};
|
||||||
|
extern PageCharacteristics page_characteristics;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// When defined, page size constants are fixed at compile time. When not
|
||||||
|
// defined, they may vary at run time.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
|
||||||
|
|
||||||
|
// Use this macro to declare a function as constexpr or not based on whether
|
||||||
|
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
|
||||||
|
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// Forward declaration, implementation below
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularity();
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityShift() {
|
||||||
|
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
|
||||||
|
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
|
||||||
|
// sizes. Since 64kB is the de facto standard on the platform and binaries
|
||||||
|
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
|
||||||
|
// here.
|
||||||
|
return 16; // 64kB
|
||||||
|
#elif defined(_MIPS_ARCH_LOONGSON)
|
||||||
|
return 14; // 16kB
|
||||||
|
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
return vm_page_shift;
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
|
||||||
|
// page sizes. Retrieve from or initialize cache.
|
||||||
|
int shift = page_characteristics.shift.load(std::memory_order_relaxed);
|
||||||
|
if (PA_UNLIKELY(shift == 0)) {
|
||||||
|
shift = __builtin_ctz((int)PageAllocationGranularity());
|
||||||
|
page_characteristics.shift.store(shift, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
return shift;
|
||||||
|
#else
|
||||||
|
return 12; // 4kB
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularity() {
|
||||||
|
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||||
|
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
|
||||||
|
// below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
|
||||||
|
return vm_page_size;
|
||||||
|
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
|
||||||
|
// initialize cache.
|
||||||
|
int size = page_characteristics.size.load(std::memory_order_relaxed);
|
||||||
|
if (PA_UNLIKELY(size == 0)) {
|
||||||
|
size = getpagesize();
|
||||||
|
page_characteristics.size.store(size, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
#else
|
||||||
|
return 1 << PageAllocationGranularityShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityOffsetMask() {
|
||||||
|
return PageAllocationGranularity() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
PageAllocationGranularityBaseMask() {
|
||||||
|
return ~PageAllocationGranularityOffsetMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageShift() {
|
||||||
|
// On Windows allocation granularity is higher than the page size. This comes
|
||||||
|
// into play when reserving address space range (allocation granularity),
|
||||||
|
// compared to committing pages into memory (system page granularity).
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
return 12; // 4096=1<<12
|
||||||
|
#else
|
||||||
|
return PageAllocationGranularityShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageSize() {
|
||||||
|
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||||
|
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||||
|
// This is literally equivalent to |1 << SystemPageShift()| below, but was
|
||||||
|
// separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
|
||||||
|
// non-constexpr.
|
||||||
|
return PageAllocationGranularity();
|
||||||
|
#else
|
||||||
|
return 1 << SystemPageShift();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageOffsetMask() {
|
||||||
|
return SystemPageSize() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||||
|
SystemPageBaseMask() {
|
||||||
|
return ~SystemPageOffsetMask();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
|
||||||
|
constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPages(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag);
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
@ -0,0 +1,238 @@
|
|||||||
|
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
//
|
||||||
|
// This file implements memory allocation primitives for PageAllocator using
|
||||||
|
// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
|
||||||
|
// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
|
||||||
|
// object that corresponds to a set of memory pages. VMO pages may be mapped
|
||||||
|
// to an address space. The code below creates VMOs for each memory allocations
|
||||||
|
// and maps them to the default address space of the current process.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
||||||
|
|
||||||
|
#include <lib/zx/vmar.h>
|
||||||
|
#include <lib/zx/vmo.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/fuchsia/fuchsia_logging.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Returns VMO name for a PageTag.
|
||||||
|
const char* PageTagToName(PageTag tag) {
|
||||||
|
switch (tag) {
|
||||||
|
case PageTag::kBlinkGC:
|
||||||
|
return "cr_blink_gc";
|
||||||
|
case PageTag::kPartitionAlloc:
|
||||||
|
return "cr_partition_alloc";
|
||||||
|
case PageTag::kChromium:
|
||||||
|
return "cr_chromium";
|
||||||
|
case PageTag::kV8:
|
||||||
|
return "cr_v8";
|
||||||
|
default:
|
||||||
|
PA_DCHECK(false);
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
zx_vm_option_t PageAccessibilityToZxVmOptions(
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
switch (accessibility) {
|
||||||
|
case PageAccessibilityConfiguration::kRead:
|
||||||
|
return ZX_VM_PERM_READ;
|
||||||
|
case PageAccessibilityConfiguration::kReadWrite:
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteTagged:
|
||||||
|
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
|
||||||
|
case PageAccessibilityConfiguration::kReadExecuteProtected:
|
||||||
|
case PageAccessibilityConfiguration::kReadExecute:
|
||||||
|
return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteExecute:
|
||||||
|
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
|
||||||
|
default:
|
||||||
|
PA_NOTREACHED();
|
||||||
|
[[fallthrough]];
|
||||||
|
case PageAccessibilityConfiguration::kInaccessible:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
|
||||||
|
// |hint| is not advisory.
|
||||||
|
constexpr bool kHintIsAdvisory = false;
|
||||||
|
|
||||||
|
std::atomic<int32_t> s_allocPageErrorCode{0};
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
zx::vmo vmo;
|
||||||
|
zx_status_t status = zx::vmo::create(length, 0, &vmo);
|
||||||
|
if (status != ZX_OK) {
|
||||||
|
PA_ZX_DLOG(INFO, status) << "zx_vmo_create";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* vmo_name = PageTagToName(page_tag);
|
||||||
|
status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
|
||||||
|
|
||||||
|
// VMO names are used only for debugging, so failure to set a name is not
|
||||||
|
// fatal.
|
||||||
|
PA_ZX_DCHECK(status == ZX_OK, status);
|
||||||
|
|
||||||
|
if (page_tag == PageTag::kV8) {
|
||||||
|
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
|
||||||
|
// in the new VMO.
|
||||||
|
status = vmo.replace_as_executable(zx::resource(), &vmo);
|
||||||
|
if (status != ZX_OK) {
|
||||||
|
PA_ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
|
||||||
|
|
||||||
|
uint64_t vmar_offset = 0;
|
||||||
|
if (hint) {
|
||||||
|
vmar_offset = hint;
|
||||||
|
options |= ZX_VM_SPECIFIC;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t address;
|
||||||
|
status =
|
||||||
|
zx::vmar::root_self()->map(options, vmar_offset, vmo,
|
||||||
|
/*vmo_offset=*/0, length, &address);
|
||||||
|
if (status != ZX_OK) {
|
||||||
|
// map() is expected to fail if |hint| is set to an already-in-use location.
|
||||||
|
if (!hint) {
|
||||||
|
PA_ZX_DLOG(ERROR, status) << "zx_vmar_map";
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t TrimMappingInternal(uintptr_t base_address,
|
||||||
|
size_t base_length,
|
||||||
|
size_t trim_length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
size_t pre_slack,
|
||||||
|
size_t post_slack) {
|
||||||
|
PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
|
||||||
|
|
||||||
|
// Unmap head if necessary.
|
||||||
|
if (pre_slack) {
|
||||||
|
zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
|
||||||
|
PA_ZX_CHECK(status == ZX_OK, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmap tail if necessary.
|
||||||
|
if (post_slack) {
|
||||||
|
zx_status_t status = zx::vmar::root_self()->unmap(
|
||||||
|
base_address + pre_slack + trim_length, post_slack);
|
||||||
|
PA_ZX_CHECK(status == ZX_OK, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
return base_address + pre_slack;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TrySetSystemPagesAccessInternal(
|
||||||
|
uint64_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
zx_status_t status = zx::vmar::root_self()->protect(
|
||||||
|
PageAccessibilityToZxVmOptions(accessibility), address, length);
|
||||||
|
return status == ZX_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSystemPagesAccessInternal(
|
||||||
|
uint64_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
zx_status_t status = zx::vmar::root_self()->protect(
|
||||||
|
PageAccessibilityToZxVmOptions(accessibility), address, length);
|
||||||
|
PA_ZX_CHECK(status == ZX_OK, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePagesInternal(uint64_t address, size_t length) {
|
||||||
|
zx_status_t status = zx::vmar::root_self()->unmap(address, length);
|
||||||
|
PA_ZX_CHECK(status == ZX_OK, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DiscardSystemPagesInternal(uint64_t address, size_t length) {
|
||||||
|
// TODO(https://crbug.com/1022062): Mark pages as discardable, rather than
|
||||||
|
// forcibly de-committing them immediately, when Fuchsia supports it.
|
||||||
|
zx_status_t status = zx::vmar::root_self()->op_range(
|
||||||
|
ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0);
|
||||||
|
PA_ZX_CHECK(status == ZX_OK, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitSystemPagesInternal(
|
||||||
|
uint64_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
if (accessibility_disposition ==
|
||||||
|
PageAccessibilityDisposition::kRequireUpdate) {
|
||||||
|
SetSystemPagesAccess(address, length,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1022062): Review whether this implementation is
|
||||||
|
// still appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
|
||||||
|
// discardable API.
|
||||||
|
DiscardSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
|
||||||
|
SetSystemPagesAccess(address, length,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1022062): this implementation will likely no longer
|
||||||
|
// be appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
|
||||||
|
// discardable API.
|
||||||
|
DiscardSystemPagesInternal(address, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// On Fuchsia systems, the caller needs to simply read the memory to recommit
|
||||||
|
// it. However, if decommit changed the permissions, recommit has to change
|
||||||
|
// them back.
|
||||||
|
if (accessibility_disposition ==
|
||||||
|
PageAccessibilityDisposition::kRequireUpdate) {
|
||||||
|
SetSystemPagesAccess(address, length, accessibility);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TryRecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// On Fuchsia systems, the caller needs to simply read the memory to recommit
|
||||||
|
// it. However, if decommit changed the permissions, recommit has to change
|
||||||
|
// them back.
|
||||||
|
if (accessibility_disposition ==
|
||||||
|
PageAccessibilityDisposition::kRequireUpdate) {
|
||||||
|
return TrySetSystemPagesAccess(address, length, accessibility);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
@ -0,0 +1,46 @@
|
|||||||
|
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
// PA_PROT_BTI requests a page that supports BTI landing pads.
|
||||||
|
#define PA_PROT_BTI 0x10
|
||||||
|
// PA_PROT_MTE requests a page that's suitable for memory tagging.
|
||||||
|
#define PA_PROT_MTE 0x20
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
|
||||||
|
switch (accessibility) {
|
||||||
|
case PageAccessibilityConfiguration::kRead:
|
||||||
|
return PROT_READ;
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteTagged:
|
||||||
|
#if defined(ARCH_CPU_ARM64)
|
||||||
|
return PROT_READ | PROT_WRITE |
|
||||||
|
(base::CPU::GetInstanceNoAllocation().has_mte() ? PA_PROT_MTE : 0);
|
||||||
|
#else
|
||||||
|
[[fallthrough]];
|
||||||
|
#endif
|
||||||
|
case PageAccessibilityConfiguration::kReadWrite:
|
||||||
|
return PROT_READ | PROT_WRITE;
|
||||||
|
case PageAccessibilityConfiguration::kReadExecuteProtected:
|
||||||
|
return PROT_READ | PROT_EXEC |
|
||||||
|
(base::CPU::GetInstanceNoAllocation().has_bti() ? PA_PROT_BTI : 0);
|
||||||
|
case PageAccessibilityConfiguration::kReadExecute:
|
||||||
|
return PROT_READ | PROT_EXEC;
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteExecute:
|
||||||
|
return PROT_READ | PROT_WRITE | PROT_EXEC;
|
||||||
|
default:
|
||||||
|
PA_NOTREACHED();
|
||||||
|
[[fallthrough]];
|
||||||
|
case PageAccessibilityConfiguration::kInaccessible:
|
||||||
|
return PROT_NONE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
@ -0,0 +1,370 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cerrno>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h"
|
||||||
|
|
||||||
|
#include <Availability.h>
|
||||||
|
#include <Security/Security.h>
|
||||||
|
#include <mach/mach.h>
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
#endif
|
||||||
|
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||||
|
#include <sys/resource.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef MAP_ANONYMOUS
|
||||||
|
#define MAP_ANONYMOUS MAP_ANON
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_MAC)
|
||||||
|
|
||||||
|
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although it’s
|
||||||
|
// available on iOS and other Apple operating systems. It is, in fact, present
|
||||||
|
// on the system since macOS 10.12.
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wavailability"
|
||||||
|
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task) API_AVAILABLE(macos(10.12));
|
||||||
|
#pragma clang diagnostic pop
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_MAC)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
const char* PageTagToName(PageTag tag) {
|
||||||
|
// Important: All the names should be string literals. As per prctl.h in
|
||||||
|
// //third_party/android_ndk the kernel keeps a pointer to the name instead
|
||||||
|
// of copying it.
|
||||||
|
//
|
||||||
|
// Having the name in .rodata ensures that the pointer remains valid as
|
||||||
|
// long as the mapping is alive.
|
||||||
|
switch (tag) {
|
||||||
|
case PageTag::kBlinkGC:
|
||||||
|
return "blink_gc";
|
||||||
|
case PageTag::kPartitionAlloc:
|
||||||
|
return "partition_alloc";
|
||||||
|
case PageTag::kChromium:
|
||||||
|
return "chromium";
|
||||||
|
case PageTag::kV8:
|
||||||
|
return "v8";
|
||||||
|
default:
|
||||||
|
PA_DCHECK(false);
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_ANDROID)
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_MAC)
|
||||||
|
// Tests whether the version of macOS supports the MAP_JIT flag and if the
|
||||||
|
// current process is signed with the hardened runtime and the allow-jit
|
||||||
|
// entitlement, returning whether MAP_JIT should be used to allocate regions
|
||||||
|
// that will contain JIT-compiled executable code.
|
||||||
|
bool UseMapJit() {
|
||||||
|
if (!base::mac::IsAtLeastOS10_14()) {
|
||||||
|
// MAP_JIT existed before macOS 10.14, but had somewhat different semantics.
|
||||||
|
// Only one MAP_JIT region was permitted per process, but calling code here
|
||||||
|
// will very likely require more than one such region. Since MAP_JIT is not
|
||||||
|
// strictly necessary to write code to a region and then execute it on these
|
||||||
|
// older OSes, don’t use it at all.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Until determining that the hardened runtime is enabled, early returns will
|
||||||
|
// return true, so that MAP_JIT will be used. This is important on arm64,
|
||||||
|
// which only allows pages to be simultaneously writable and executable when
|
||||||
|
// in a region allocated with MAP_JIT, regardless of code signing options. On
|
||||||
|
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
|
||||||
|
// executable fails with EPERM. Although this is not enforced on x86_64,
|
||||||
|
// MAP_JIT is harmless in that case.
|
||||||
|
|
||||||
|
base::ScopedCFTypeRef<SecTaskRef> task(
|
||||||
|
SecTaskCreateFromSelf(kCFAllocatorDefault));
|
||||||
|
if (!task) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t flags = SecTaskGetCodeSignStatus(task);
|
||||||
|
if (!(flags & kSecCodeSignatureRuntime)) {
|
||||||
|
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
|
||||||
|
// == CS_RUNTIME.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The hardened runtime is enabled. From this point on, early returns must
|
||||||
|
// return false, indicating that MAP_JIT is not to be used. It’s an error
|
||||||
|
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
|
||||||
|
// entitlement is specified.
|
||||||
|
|
||||||
|
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
|
||||||
|
SecTaskCopyValueForEntitlement(
|
||||||
|
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
|
||||||
|
if (!jit_entitlement)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
|
||||||
|
kCFBooleanTrue;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_MAC)
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// |mmap| uses a nearby address if the hint address is blocked.
|
||||||
|
constexpr bool kHintIsAdvisory = true;
|
||||||
|
std::atomic<int32_t> s_allocPageErrorCode{0};
|
||||||
|
|
||||||
|
int GetAccessFlags(PageAccessibilityConfiguration accessibility);
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// Use a custom tag to make it easier to distinguish Partition Alloc regions
|
||||||
|
// in vmmap(1). Tags between 240-255 are supported.
|
||||||
|
PA_DCHECK(PageTag::kFirst <= page_tag);
|
||||||
|
PA_DCHECK(PageTag::kLast >= page_tag);
|
||||||
|
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
|
||||||
|
#else
|
||||||
|
int fd = -1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int access_flag = GetAccessFlags(accessibility);
|
||||||
|
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_MAC)
|
||||||
|
// On macOS 10.14 and higher, executables that are code signed with the
|
||||||
|
// "runtime" option cannot execute writable memory by default. They can opt
|
||||||
|
// into this capability by specifying the "com.apple.security.cs.allow-jit"
|
||||||
|
// code signing entitlement and allocating the region with the MAP_JIT flag.
|
||||||
|
static const bool kUseMapJit = UseMapJit();
|
||||||
|
if (page_tag == PageTag::kV8 && kUseMapJit) {
|
||||||
|
map_flags |= MAP_JIT;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void* ret = mmap(reinterpret_cast<void*>(hint), length, access_flag,
|
||||||
|
map_flags, fd, 0);
|
||||||
|
if (ret == MAP_FAILED) {
|
||||||
|
s_allocPageErrorCode = errno;
|
||||||
|
ret = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_ANDROID)
|
||||||
|
// On Android, anonymous mappings can have a name attached to them. This is
|
||||||
|
// useful for debugging, and double-checking memory attribution.
|
||||||
|
if (ret) {
|
||||||
|
// No error checking on purpose, testing only.
|
||||||
|
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
|
||||||
|
PageTagToName(page_tag));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return reinterpret_cast<uintptr_t>(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TrySetSystemPagesAccessInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
|
||||||
|
GetAccessFlags(accessibility)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSystemPagesAccessInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
int access_flags = GetAccessFlags(accessibility);
|
||||||
|
const int ret = PA_HANDLE_EINTR(
|
||||||
|
mprotect(reinterpret_cast<void*>(address), length, access_flags));
|
||||||
|
|
||||||
|
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
|
||||||
|
// kernel data structures cannot be allocated, (2) the address range is
|
||||||
|
// invalid, or (3) this would split an existing mapping in a way that would
|
||||||
|
// exceed the maximum number of allowed mappings.
|
||||||
|
//
|
||||||
|
// Neither are very likely, but we still get a lot of crashes here. This is
|
||||||
|
// because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
|
||||||
|
// access flags match a "data" mapping, which in our case would be MAP_PRIVATE
|
||||||
|
// | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
|
||||||
|
// mm/mprotect.c in the kernel for details.
|
||||||
|
//
|
||||||
|
// In this case, we are almost certainly bumping into the sandbox limit, mark
|
||||||
|
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
|
||||||
|
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
|
||||||
|
OOM_CRASH(length);
|
||||||
|
|
||||||
|
PA_PCHECK(0 == ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePagesInternal(uintptr_t address, size_t length) {
|
||||||
|
PA_PCHECK(0 == munmap(reinterpret_cast<void*>(address), length));
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t TrimMappingInternal(uintptr_t base_address,
|
||||||
|
size_t base_length,
|
||||||
|
size_t trim_length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
size_t pre_slack,
|
||||||
|
size_t post_slack) {
|
||||||
|
uintptr_t ret = base_address;
|
||||||
|
// We can resize the allocation run. Release unneeded memory before and after
|
||||||
|
// the aligned range.
|
||||||
|
if (pre_slack) {
|
||||||
|
FreePages(base_address, pre_slack);
|
||||||
|
ret = base_address + pre_slack;
|
||||||
|
}
|
||||||
|
if (post_slack) {
|
||||||
|
FreePages(ret + trim_length, post_slack);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// In POSIX, there is no decommit concept. Discarding is an effective way of
|
||||||
|
// implementing the Windows semantics where the OS is allowed to not swap the
|
||||||
|
// pages in the region.
|
||||||
|
DiscardSystemPages(address, length);
|
||||||
|
|
||||||
|
bool change_permissions =
|
||||||
|
accessibility_disposition == PageAccessibilityDisposition::kRequireUpdate;
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
// This is not guaranteed, show that we're serious.
|
||||||
|
//
|
||||||
|
// More specifically, several callers have had issues with assuming that
|
||||||
|
// memory is zeroed, this would hopefully make these bugs more visible. We
|
||||||
|
// don't memset() everything, because ranges can be very large, and doing it
|
||||||
|
// over the entire range could make Chrome unusable with
|
||||||
|
// BUILDFLAG(PA_DCHECK_IS_ON).
|
||||||
|
//
|
||||||
|
// Only do it when we are about to change the permissions, since we don't know
|
||||||
|
// the previous permissions, and cannot restore them.
|
||||||
|
if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
|
||||||
|
// Memory may not be writable.
|
||||||
|
size_t size = std::min(length, 2 * SystemPageSize());
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0);
|
||||||
|
memset(ptr, 0xcc, size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Make pages inaccessible, unless the caller requested to keep permissions.
|
||||||
|
//
|
||||||
|
// Note, there is a small window between these calls when the pages can be
|
||||||
|
// incorrectly touched and brought back to memory. Not ideal, but doing those
|
||||||
|
// operations in the opposite order resulted in PMF regression on Mac (see
|
||||||
|
// crbug.com/1153021).
|
||||||
|
if (change_permissions) {
|
||||||
|
SetSystemPagesAccess(address, length,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
|
||||||
|
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
|
||||||
|
// a MAP_FIXED request is successful, then any previous mappings [...] for
|
||||||
|
// those whole pages containing any part of the address range [pa,pa+len)
|
||||||
|
// shall be removed, as if by an appropriate call to munmap(), before the
|
||||||
|
// new mapping is established." As a consequence, the memory will be
|
||||||
|
// zero-initialized on next access.
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
void* ret = mmap(ptr, length, PROT_NONE,
|
||||||
|
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||||
|
PA_CHECK(ptr == ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// On POSIX systems, the caller needs to simply read the memory to recommit
|
||||||
|
// it. However, if decommit changed the permissions, recommit has to change
|
||||||
|
// them back.
|
||||||
|
if (accessibility_disposition ==
|
||||||
|
PageAccessibilityDisposition::kRequireUpdate) {
|
||||||
|
SetSystemPagesAccess(address, length, accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// On macOS, to update accounting, we need to make another syscall. For more
|
||||||
|
// details, see https://crbug.com/823915.
|
||||||
|
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TryRecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// On POSIX systems, the caller needs to simply read the memory to recommit
|
||||||
|
// it. However, if decommit changed the permissions, recommit has to change
|
||||||
|
// them back.
|
||||||
|
if (accessibility_disposition ==
|
||||||
|
PageAccessibilityDisposition::kRequireUpdate) {
|
||||||
|
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
|
||||||
|
if (!ok)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
// On macOS, to update accounting, we need to make another syscall. For more
|
||||||
|
// details, see https://crbug.com/823915.
|
||||||
|
madvise(reinterpret_cast<void*>(address), length, MADV_FREE_REUSE);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
#if BUILDFLAG(IS_APPLE)
|
||||||
|
int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
|
||||||
|
if (ret) {
|
||||||
|
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
|
||||||
|
ret = madvise(ptr, length, MADV_DONTNEED);
|
||||||
|
}
|
||||||
|
PA_PCHECK(ret == 0);
|
||||||
|
#else
|
||||||
|
// We have experimented with other flags, but with suboptimal results.
|
||||||
|
//
|
||||||
|
// MADV_FREE (Linux): Makes our memory measurements less predictable;
|
||||||
|
// performance benefits unclear.
|
||||||
|
//
|
||||||
|
// Therefore, we just do the simple thing: MADV_DONTNEED.
|
||||||
|
PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
@ -0,0 +1,199 @@
|
|||||||
|
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
||||||
|
|
||||||
|
#include <versionhelpers.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/oom.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// On Windows, discarded pages are not returned to the system immediately and
|
||||||
|
// not guaranteed to be zeroed when returned to the application.
|
||||||
|
using DiscardVirtualMemoryFunction = DWORD(WINAPI*)(PVOID virtualAddress,
|
||||||
|
SIZE_T size);
|
||||||
|
DiscardVirtualMemoryFunction s_discard_virtual_memory =
|
||||||
|
reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
|
||||||
|
constexpr bool kHintIsAdvisory = false;
|
||||||
|
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
|
||||||
|
|
||||||
|
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
|
||||||
|
switch (accessibility) {
|
||||||
|
case PageAccessibilityConfiguration::kRead:
|
||||||
|
return PAGE_READONLY;
|
||||||
|
case PageAccessibilityConfiguration::kReadWrite:
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteTagged:
|
||||||
|
return PAGE_READWRITE;
|
||||||
|
case PageAccessibilityConfiguration::kReadExecute:
|
||||||
|
case PageAccessibilityConfiguration::kReadExecuteProtected:
|
||||||
|
return PAGE_EXECUTE_READ;
|
||||||
|
case PageAccessibilityConfiguration::kReadWriteExecute:
|
||||||
|
return PAGE_EXECUTE_READWRITE;
|
||||||
|
default:
|
||||||
|
PA_NOTREACHED();
|
||||||
|
[[fallthrough]];
|
||||||
|
case PageAccessibilityConfiguration::kInaccessible:
|
||||||
|
return PAGE_NOACCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageTag page_tag) {
|
||||||
|
DWORD access_flag = GetAccessFlags(accessibility);
|
||||||
|
const DWORD type_flags =
|
||||||
|
(accessibility != PageAccessibilityConfiguration::kInaccessible)
|
||||||
|
? (MEM_RESERVE | MEM_COMMIT)
|
||||||
|
: MEM_RESERVE;
|
||||||
|
void* ret = VirtualAlloc(reinterpret_cast<void*>(hint), length, type_flags,
|
||||||
|
access_flag);
|
||||||
|
if (ret == nullptr) {
|
||||||
|
s_allocPageErrorCode = GetLastError();
|
||||||
|
}
|
||||||
|
return reinterpret_cast<uintptr_t>(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t TrimMappingInternal(uintptr_t base_address,
|
||||||
|
size_t base_length,
|
||||||
|
size_t trim_length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
size_t pre_slack,
|
||||||
|
size_t post_slack) {
|
||||||
|
uintptr_t ret = base_address;
|
||||||
|
if (pre_slack || post_slack) {
|
||||||
|
// We cannot resize the allocation run. Free it and retry at the aligned
|
||||||
|
// address within the freed range.
|
||||||
|
ret = base_address + pre_slack;
|
||||||
|
FreePages(base_address, base_length);
|
||||||
|
ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TrySetSystemPagesAccessInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
if (accessibility == PageAccessibilityConfiguration::kInaccessible)
|
||||||
|
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
|
||||||
|
return nullptr !=
|
||||||
|
VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSystemPagesAccessInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility) {
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
if (accessibility == PageAccessibilityConfiguration::kInaccessible) {
|
||||||
|
if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
|
||||||
|
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
|
||||||
|
// report we get the error number.
|
||||||
|
PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility))) {
|
||||||
|
int32_t error = GetLastError();
|
||||||
|
if (error == ERROR_COMMITMENT_LIMIT)
|
||||||
|
OOM_CRASH(length);
|
||||||
|
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
|
||||||
|
// report we get the error number.
|
||||||
|
PA_CHECK(ERROR_SUCCESS == error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePagesInternal(uintptr_t address, size_t length) {
|
||||||
|
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), 0, MEM_RELEASE));
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||||
|
// making pages inaccessible.
|
||||||
|
SetSystemPagesAccess(address, length,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
|
||||||
|
// "If a page is decommitted but not released, its state changes to reserved.
|
||||||
|
// Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
|
||||||
|
// release it. Attempts to read from or write to a reserved page results in an
|
||||||
|
// access violation exception."
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
|
||||||
|
// for MEM_COMMIT: "The function also guarantees that when the caller later
|
||||||
|
// initially accesses the memory, the contents will be zero."
|
||||||
|
PA_CHECK(VirtualFree(reinterpret_cast<void*>(address), length, MEM_DECOMMIT));
|
||||||
|
}
|
||||||
|
|
||||||
|
void RecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||||
|
// making pages inaccessible.
|
||||||
|
SetSystemPagesAccess(address, length, accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TryRecommitSystemPagesInternal(
|
||||||
|
uintptr_t address,
|
||||||
|
size_t length,
|
||||||
|
PageAccessibilityConfiguration accessibility,
|
||||||
|
PageAccessibilityDisposition accessibility_disposition) {
|
||||||
|
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||||
|
// making pages inaccessible.
|
||||||
|
return TrySetSystemPagesAccess(address, length, accessibility);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
|
||||||
|
if (s_discard_virtual_memory ==
|
||||||
|
reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) {
|
||||||
|
// DiscardVirtualMemory's minimum supported client is Windows 8.1 Update.
|
||||||
|
// So skip GetProcAddress("DiscardVirtualMemory") if windows version is
|
||||||
|
// smaller than Windows 8.1.
|
||||||
|
if (IsWindows8Point1OrGreater()) {
|
||||||
|
s_discard_virtual_memory =
|
||||||
|
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
|
||||||
|
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
|
||||||
|
} else {
|
||||||
|
s_discard_virtual_memory = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void* ptr = reinterpret_cast<void*>(address);
|
||||||
|
// Use DiscardVirtualMemory when available because it releases faster than
|
||||||
|
// MEM_RESET.
|
||||||
|
DWORD ret = 1;
|
||||||
|
if (s_discard_virtual_memory) {
|
||||||
|
ret = s_discard_virtual_memory(ptr, length);
|
||||||
|
}
|
||||||
|
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
|
||||||
|
// failure.
|
||||||
|
if (ret) {
|
||||||
|
PA_CHECK(VirtualAlloc(ptr, length, MEM_RESET, PAGE_READWRITE));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
@ -0,0 +1,245 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <ostream>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/tagging.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
#include <mach-o/dyld.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
PA_NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
|
||||||
|
PA_NO_CODE_FOLDING();
|
||||||
|
PA_CHECK(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
|
||||||
|
PA_NO_CODE_FOLDING();
|
||||||
|
PA_CHECK(false);
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
|
||||||
|
PA_NOINLINE void HandleGigaCageAllocFailure() {
|
||||||
|
PA_NO_CODE_FOLDING();
|
||||||
|
uint32_t alloc_page_error_code = GetAllocPageErrorCode();
|
||||||
|
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
|
||||||
|
// It's important to easily differentiate these two failures on Windows, so
|
||||||
|
// crash with different stacks.
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
|
||||||
|
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
|
||||||
|
// it must be VA space exhaustion.
|
||||||
|
HandleGigaCageAllocFailureOutOfVASpace();
|
||||||
|
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
|
||||||
|
// On Windows <8.1, MEM_RESERVE increases commit charge to account for
|
||||||
|
// not-yet-committed PTEs needed to cover that VA space, if it was to be
|
||||||
|
// committed (see crbug.com/1101421#c16).
|
||||||
|
HandleGigaCageAllocFailureOutOfCommitCharge();
|
||||||
|
} else
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
{
|
||||||
|
PA_CHECK(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
alignas(kPartitionCachelineSize)
|
||||||
|
PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_;
|
||||||
|
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
namespace {
|
||||||
|
bool IsIOSTestProcess() {
|
||||||
|
// On iOS, only applications with the extended virtual addressing entitlement
|
||||||
|
// can use a large address space. Since Earl Grey test runner apps cannot get
|
||||||
|
// entitlements, they must use a much smaller pool size.
|
||||||
|
uint32_t executable_length = 0;
|
||||||
|
_NSGetExecutablePath(NULL, &executable_length);
|
||||||
|
PA_DCHECK(executable_length > 0);
|
||||||
|
|
||||||
|
// 'new' cannot be used here, since this function is called during
|
||||||
|
// PartitionAddressSpace initialization, at which point 'new' interception
|
||||||
|
// is already active. 'malloc' is safe to use, since on Apple platforms,
|
||||||
|
// InitializeDefaultAllocatorPartitionRoot() is called before 'malloc'
|
||||||
|
// interception is set up.
|
||||||
|
char* executable_path = (char*)malloc(executable_length);
|
||||||
|
int rv = _NSGetExecutablePath(executable_path, &executable_length);
|
||||||
|
PA_DCHECK(!rv);
|
||||||
|
size_t executable_path_length =
|
||||||
|
std::char_traits<char>::length(executable_path);
|
||||||
|
|
||||||
|
const char kTestProcessSuffix[] = "Runner";
|
||||||
|
size_t test_process_suffix_length =
|
||||||
|
std::char_traits<char>::length(kTestProcessSuffix);
|
||||||
|
|
||||||
|
if (executable_path_length < test_process_suffix_length)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return !std::char_traits<char>::compare(
|
||||||
|
executable_path + (executable_path_length - test_process_suffix_length),
|
||||||
|
kTestProcessSuffix, test_process_suffix_length);
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
|
||||||
|
return IsIOSTestProcess() ? kRegularPoolSizeForIOSTestProcess
|
||||||
|
: kRegularPoolSize;
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
|
||||||
|
return IsIOSTestProcess() ? kBRPPoolSizeForIOSTestProcess : kBRPPoolSize;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
|
||||||
|
return kRegularPoolSize;
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
|
||||||
|
return kBRPPoolSize;
|
||||||
|
}
|
||||||
|
#endif // BUILDFLAG(IS_IOS)
|
||||||
|
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
|
||||||
|
void PartitionAddressSpace::Init() {
|
||||||
|
if (IsInitialized())
|
||||||
|
return;
|
||||||
|
|
||||||
|
size_t regular_pool_size = RegularPoolSize();
|
||||||
|
setup_.regular_pool_base_address_ = AllocPages(
|
||||||
|
regular_pool_size, regular_pool_size,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
|
||||||
|
if (!setup_.regular_pool_base_address_)
|
||||||
|
HandleGigaCageAllocFailure();
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1) & kMemTagUnmask;
|
||||||
|
#endif
|
||||||
|
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
|
||||||
|
setup_.regular_pool_ = AddressPoolManager::GetInstance().Add(
|
||||||
|
setup_.regular_pool_base_address_, regular_pool_size);
|
||||||
|
PA_CHECK(setup_.regular_pool_ == kRegularPoolHandle);
|
||||||
|
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
|
||||||
|
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
|
||||||
|
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
|
||||||
|
regular_pool_size - 1));
|
||||||
|
PA_DCHECK(
|
||||||
|
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
|
||||||
|
|
||||||
|
size_t brp_pool_size = BRPPoolSize();
|
||||||
|
// Reserve an extra allocation granularity unit before the BRP pool, but keep
|
||||||
|
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
|
||||||
|
// is a valid pointer, and having a "forbidden zone" before the BRP pool
|
||||||
|
// prevents such a pointer from "sneaking into" the pool.
|
||||||
|
const size_t kForbiddenZoneSize = PageAllocationGranularity();
|
||||||
|
uintptr_t base_address = AllocPagesWithAlignOffset(
|
||||||
|
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
|
||||||
|
brp_pool_size - kForbiddenZoneSize,
|
||||||
|
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
|
||||||
|
if (!base_address)
|
||||||
|
HandleGigaCageAllocFailure();
|
||||||
|
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1) & kMemTagUnmask;
|
||||||
|
#endif
|
||||||
|
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
|
||||||
|
setup_.brp_pool_ = AddressPoolManager::GetInstance().Add(
|
||||||
|
setup_.brp_pool_base_address_, brp_pool_size);
|
||||||
|
PA_CHECK(setup_.brp_pool_ == kBRPPoolHandle);
|
||||||
|
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
|
||||||
|
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
|
||||||
|
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
|
||||||
|
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
|
||||||
|
|
||||||
|
#if PA_STARSCAN_USE_CARD_TABLE
|
||||||
|
// Reserve memory for PCScan quarantine card table.
|
||||||
|
uintptr_t requested_address = setup_.regular_pool_base_address_;
|
||||||
|
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
|
||||||
|
setup_.regular_pool_, requested_address, kSuperPageSize);
|
||||||
|
PA_CHECK(requested_address == actual_address)
|
||||||
|
<< "QuarantineCardTable is required to be allocated at the beginning of "
|
||||||
|
"the regular pool";
|
||||||
|
#endif // PA_STARSCAN_USE_CARD_TABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
|
||||||
|
size_t size) {
|
||||||
|
// The ConfigurablePool must only be initialized once.
|
||||||
|
PA_CHECK(!IsConfigurablePoolInitialized());
|
||||||
|
|
||||||
|
// The other Pools must be initialized first.
|
||||||
|
Init();
|
||||||
|
|
||||||
|
PA_CHECK(pool_base);
|
||||||
|
PA_CHECK(size <= kConfigurablePoolMaxSize);
|
||||||
|
PA_CHECK(size >= kConfigurablePoolMinSize);
|
||||||
|
PA_CHECK(base::bits::IsPowerOfTwo(size));
|
||||||
|
PA_CHECK(pool_base % size == 0);
|
||||||
|
|
||||||
|
setup_.configurable_pool_base_address_ = pool_base;
|
||||||
|
setup_.configurable_pool_base_mask_ = ~(size - 1);
|
||||||
|
|
||||||
|
setup_.configurable_pool_ = AddressPoolManager::GetInstance().Add(
|
||||||
|
setup_.configurable_pool_base_address_, size);
|
||||||
|
PA_CHECK(setup_.configurable_pool_ == kConfigurablePoolHandle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionAddressSpace::UninitForTesting() {
|
||||||
|
FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
|
||||||
|
// For BRP pool, the allocation region includes a "forbidden zone" before the
|
||||||
|
// pool.
|
||||||
|
const size_t kForbiddenZoneSize = PageAllocationGranularity();
|
||||||
|
FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
|
||||||
|
BRPPoolSize() + kForbiddenZoneSize);
|
||||||
|
// Do not free pages for the configurable pool, because its memory is owned
|
||||||
|
// by someone else, but deinitialize it nonetheless.
|
||||||
|
setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
|
||||||
|
setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
|
||||||
|
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
|
||||||
|
setup_.configurable_pool_base_mask_ = 0;
|
||||||
|
setup_.regular_pool_ = 0;
|
||||||
|
setup_.brp_pool_ = 0;
|
||||||
|
setup_.configurable_pool_ = 0;
|
||||||
|
AddressPoolManager::GetInstance().ResetForTesting();
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
|
||||||
|
AddressPoolManager::GetInstance().Remove(setup_.configurable_pool_);
|
||||||
|
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
|
||||||
|
setup_.configurable_pool_base_mask_ = 0;
|
||||||
|
setup_.configurable_pool_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
PageCharacteristics page_characteristics;
|
||||||
|
|
||||||
|
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
351
src/base/allocator/partition_allocator/partition_address_space.h
Normal file
351
src/base/allocator/partition_allocator/partition_address_space.h
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||||
|
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||||
|
#include "base/allocator/partition_allocator/tagging.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// The feature is not applicable to 32-bit address space.
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
// Reserves address space for PartitionAllocator.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||||
|
public:
|
||||||
|
// BRP stands for BackupRefPtr. GigaCage is split into pools, one which
|
||||||
|
// supports BackupRefPtr and one that doesn't.
|
||||||
|
static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() {
|
||||||
|
return setup_.regular_pool_;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
|
||||||
|
return setup_.regular_pool_base_mask_;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
|
||||||
|
return kRegularPoolBaseMask;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE internal::pool_handle GetBRPPool() {
|
||||||
|
return setup_.brp_pool_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Configurable Pool can be created inside an existing mapping and so will
|
||||||
|
// be located outside PartitionAlloc's GigaCage.
|
||||||
|
static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
|
||||||
|
return setup_.configurable_pool_;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||||
|
uintptr_t address) {
|
||||||
|
address = ::partition_alloc::internal::UnmaskPtr(address);
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PA_DCHECK(!IsInBRPPool(address));
|
||||||
|
#endif
|
||||||
|
pool_handle pool = 0;
|
||||||
|
uintptr_t base = 0;
|
||||||
|
if (IsInRegularPool(address)) {
|
||||||
|
pool = GetRegularPool();
|
||||||
|
base = setup_.regular_pool_base_address_;
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
} else if (IsInBRPPool(address)) {
|
||||||
|
pool = GetBRPPool();
|
||||||
|
base = setup_.brp_pool_base_address_;
|
||||||
|
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
} else if (IsInConfigurablePool(address)) {
|
||||||
|
pool = GetConfigurablePool();
|
||||||
|
base = setup_.configurable_pool_base_address_;
|
||||||
|
} else {
|
||||||
|
PA_NOTREACHED();
|
||||||
|
}
|
||||||
|
return std::make_pair(pool, address - base);
|
||||||
|
}
|
||||||
|
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
|
||||||
|
return kConfigurablePoolMaxSize;
|
||||||
|
}
|
||||||
|
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
|
||||||
|
return kConfigurablePoolMinSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the GigaCage and the Pools inside of it.
|
||||||
|
// This function must only be called from the main thread.
|
||||||
|
static void Init();
|
||||||
|
// Initialize the ConfigurablePool at the given address |pool_base|. It must
|
||||||
|
// be aligned to the size of the pool. The size must be a power of two and
|
||||||
|
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. This
|
||||||
|
// function must only be called from the main thread.
|
||||||
|
static void InitConfigurablePool(uintptr_t pool_base, size_t size);
|
||||||
|
static void UninitForTesting();
|
||||||
|
static void UninitConfigurablePoolForTesting();
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE bool IsInitialized() {
|
||||||
|
// Either neither or both regular and BRP pool are initialized. The
|
||||||
|
// configurable pool is initialized separately.
|
||||||
|
if (setup_.regular_pool_) {
|
||||||
|
PA_DCHECK(setup_.brp_pool_ != 0);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_DCHECK(setup_.brp_pool_ == 0);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
|
||||||
|
return setup_.configurable_pool_base_address_ !=
|
||||||
|
kUninitializedPoolBaseAddress;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
|
||||||
|
#else
|
||||||
|
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
|
||||||
|
#endif
|
||||||
|
return (address & regular_pool_base_mask) ==
|
||||||
|
setup_.regular_pool_base_address_;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
|
||||||
|
return setup_.regular_pool_base_address_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
|
||||||
|
#else
|
||||||
|
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
|
||||||
|
#endif
|
||||||
|
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
|
||||||
|
}
|
||||||
|
// Returns false for nullptr.
|
||||||
|
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
|
||||||
|
return (address & setup_.configurable_pool_base_mask_) ==
|
||||||
|
setup_.configurable_pool_base_address_;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
|
||||||
|
return setup_.configurable_pool_base_address_;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
|
||||||
|
PA_DCHECK(IsInBRPPool(address));
|
||||||
|
return ::partition_alloc::internal::UnmaskPtr(address) -
|
||||||
|
setup_.brp_pool_base_address_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// PartitionAddressSpace is static_only class.
|
||||||
|
PartitionAddressSpace() = delete;
|
||||||
|
PartitionAddressSpace(const PartitionAddressSpace&) = delete;
|
||||||
|
void* operator new(size_t) = delete;
|
||||||
|
void* operator new(size_t, void*) = delete;
|
||||||
|
|
||||||
|
private:
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
static PA_ALWAYS_INLINE size_t RegularPoolSize();
|
||||||
|
static PA_ALWAYS_INLINE size_t BRPPoolSize();
|
||||||
|
#else
|
||||||
|
// The pool sizes should be as large as maximum whenever possible.
|
||||||
|
constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
|
||||||
|
return kRegularPoolSize;
|
||||||
|
}
|
||||||
|
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
|
||||||
|
return kBRPPoolSize;
|
||||||
|
}
|
||||||
|
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
|
||||||
|
// On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
|
||||||
|
// where all allocations have a BRP ref-count, thus pointers pointing there
|
||||||
|
// can use a BRP protection against UaF. Allocations in the other pools don't
|
||||||
|
// have that.
|
||||||
|
//
|
||||||
|
// Pool sizes have to be the power of two. Each pool will be aligned at its
|
||||||
|
// own size boundary.
|
||||||
|
//
|
||||||
|
// NOTE! The BRP pool must be preceded by a reserved region, where allocations
|
||||||
|
// are forbidden. This is to prevent a pointer immediately past a non-GigaCage
|
||||||
|
// allocation from falling into the BRP pool, thus triggering BRP mechanism
|
||||||
|
// and likely crashing. This "forbidden zone" can be as small as 1B, but it's
|
||||||
|
// simpler to just reserve an allocation granularity unit.
|
||||||
|
//
|
||||||
|
// The ConfigurablePool is an optional Pool that can be created inside an
|
||||||
|
// existing mapping by the embedder, and so will be outside of the GigaCage.
|
||||||
|
// This Pool can be used when certain PA allocations must be located inside a
|
||||||
|
// given virtual address region. One use case for this Pool is V8's virtual
|
||||||
|
// memory cage, which requires that ArrayBuffers be located inside of it.
|
||||||
|
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
|
||||||
|
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
|
||||||
|
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
|
||||||
|
base::bits::IsPowerOfTwo(kBRPPoolSize));
|
||||||
|
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
|
||||||
|
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
|
||||||
|
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
|
||||||
|
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize) &&
|
||||||
|
base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
|
||||||
|
|
||||||
|
#if BUILDFLAG(IS_IOS)
|
||||||
|
|
||||||
|
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
#error iOS is only supported with a dynamically sized GigaCase.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
|
||||||
|
// since the test process cannot use an extended virtual address space (see
|
||||||
|
// crbug.com/1250788).
|
||||||
|
static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
|
||||||
|
static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
|
||||||
|
static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
|
||||||
|
static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
|
||||||
|
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess) &&
|
||||||
|
base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
|
||||||
|
#endif // BUILDFLAG(IOS_IOS)
|
||||||
|
|
||||||
|
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
// Masks used to easy determine belonging to a pool.
|
||||||
|
// On Arm, the top byte of each pointer is ignored (meaning there are
|
||||||
|
// effectively 256 versions of each valid pointer). 4 bits are used to store
|
||||||
|
// tags for Arm's Memory Tagging Extension (MTE). To ensure that tagged
|
||||||
|
// pointers are recognized as being in the pool, mask off the top byte with
|
||||||
|
// kMemTagUnmask.
|
||||||
|
static constexpr uintptr_t kRegularPoolOffsetMask =
|
||||||
|
static_cast<uintptr_t>(kRegularPoolSize) - 1;
|
||||||
|
static constexpr uintptr_t kRegularPoolBaseMask =
|
||||||
|
~kRegularPoolOffsetMask & kMemTagUnmask;
|
||||||
|
static constexpr uintptr_t kBRPPoolOffsetMask =
|
||||||
|
static_cast<uintptr_t>(kBRPPoolSize) - 1;
|
||||||
|
static constexpr uintptr_t kBRPPoolBaseMask =
|
||||||
|
~kBRPPoolOffsetMask & kMemTagUnmask;
|
||||||
|
#endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
|
||||||
|
// This must be set to such a value that IsIn*Pool() always returns false when
|
||||||
|
// the pool isn't initialized.
|
||||||
|
static constexpr uintptr_t kUninitializedPoolBaseAddress =
|
||||||
|
static_cast<uintptr_t>(-1);
|
||||||
|
|
||||||
|
struct GigaCageSetup {
|
||||||
|
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
|
||||||
|
// reserved address space. Therefore, set *_pool_base_address_ initially to
|
||||||
|
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
|
||||||
|
constexpr GigaCageSetup()
|
||||||
|
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
|
||||||
|
brp_pool_base_address_(kUninitializedPoolBaseAddress),
|
||||||
|
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
regular_pool_base_mask_(0),
|
||||||
|
brp_pool_base_mask_(0),
|
||||||
|
#endif
|
||||||
|
configurable_pool_base_mask_(0),
|
||||||
|
regular_pool_(0),
|
||||||
|
brp_pool_(0),
|
||||||
|
configurable_pool_(0) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Using a union to enforce padding.
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
uintptr_t regular_pool_base_address_;
|
||||||
|
uintptr_t brp_pool_base_address_;
|
||||||
|
uintptr_t configurable_pool_base_address_;
|
||||||
|
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
|
||||||
|
uintptr_t regular_pool_base_mask_;
|
||||||
|
uintptr_t brp_pool_base_mask_;
|
||||||
|
#endif
|
||||||
|
uintptr_t configurable_pool_base_mask_;
|
||||||
|
|
||||||
|
pool_handle regular_pool_;
|
||||||
|
pool_handle brp_pool_;
|
||||||
|
pool_handle configurable_pool_;
|
||||||
|
};
|
||||||
|
|
||||||
|
char one_cacheline_[kPartitionCachelineSize];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
|
||||||
|
"GigaCageSetup has to fill a cacheline(s)");
|
||||||
|
|
||||||
|
// See the comment describing the address layout above.
|
||||||
|
//
|
||||||
|
// These are write-once fields, frequently accessed thereafter. Make sure they
|
||||||
|
// don't share a cacheline with other, potentially writeable data, through
|
||||||
|
// alignment and padding.
|
||||||
|
alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
|
||||||
|
};
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||||
|
uintptr_t address) {
|
||||||
|
return PartitionAddressSpace::GetPoolAndOffset(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
|
||||||
|
return std::get<0>(GetPoolAndOffset(address));
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
|
||||||
|
return PartitionAddressSpace::OffsetInBRPPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
|
||||||
|
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||||
|
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
|
||||||
|
#endif
|
||||||
|
return internal::PartitionAddressSpace::IsInRegularPool(address)
|
||||||
|
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
|| internal::PartitionAddressSpace::IsInBRPPool(address)
|
||||||
|
#endif
|
||||||
|
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
|
||||||
|
return internal::PartitionAddressSpace::IsInRegularPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
|
||||||
|
return internal::PartitionAddressSpace::IsInBRPPool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false for nullptr.
|
||||||
|
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
|
||||||
|
uintptr_t address) {
|
||||||
|
return internal::PartitionAddressSpace::IsInConfigurablePool(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
|
||||||
|
return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
77
src/base/allocator/partition_allocator/partition_alloc-inl.h
Normal file
77
src/base/allocator/partition_allocator/partition_alloc-inl.h
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||||
|
#include "base/allocator/partition_allocator/random.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// Prefetch *x into memory.
|
||||||
|
#if defined(__clang__) || defined(COMPILER_GCC)
|
||||||
|
#define PA_PREFETCH(x) __builtin_prefetch(x)
|
||||||
|
#else
|
||||||
|
#define PA_PREFETCH(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
// This is a `memset` that resists being optimized away. Adapted from
|
||||||
|
// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
|
||||||
|
// depend on //third_party, and this is small enough.)
|
||||||
|
PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
|
||||||
|
memset(ptr, value, size);
|
||||||
|
|
||||||
|
// As best as we can tell, this is sufficient to break any optimisations that
|
||||||
|
// might try to eliminate "superfluous" memsets. If there's an easy way to
|
||||||
|
// detect memset_s, it would be better to use that.
|
||||||
|
__asm__ __volatile__("" : : "r"(ptr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used to memset() memory for debugging purposes only.
|
||||||
|
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
|
||||||
|
// Only set the first 512kiB of the allocation. This is enough to detect uses
|
||||||
|
// of uininitialized / freed memory, and makes tests run significantly
|
||||||
|
// faster. Note that for direct-mapped allocations, memory is decomitted at
|
||||||
|
// free() time, so freed memory usage cannot happen.
|
||||||
|
size_t size_to_memset = std::min(size, size_t{1} << 19);
|
||||||
|
memset(ptr, value, size_to_memset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if we've hit the end of a random-length period. We don't want to
|
||||||
|
// invoke `RandomValue` too often, because we call this function in a hot spot
|
||||||
|
// (`Free`), and `RandomValue` incurs the cost of atomics.
|
||||||
|
#if !BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
PA_ALWAYS_INLINE bool RandomPeriod() {
|
||||||
|
static thread_local uint8_t counter = 0;
|
||||||
|
if (PA_UNLIKELY(counter == 0)) {
|
||||||
|
// It's OK to truncate this value.
|
||||||
|
counter = static_cast<uint8_t>(::partition_alloc::internal::RandomValue());
|
||||||
|
}
|
||||||
|
// If `counter` is 0, this will wrap. That is intentional and OK.
|
||||||
|
counter--;
|
||||||
|
return counter == 0;
|
||||||
|
}
|
||||||
|
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
namespace base::internal {
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||||
|
// the migration to the new namespaces gets done.
|
||||||
|
using ::partition_alloc::internal::SecureMemset;
|
||||||
|
#if !BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
using ::partition_alloc::internal::RandomPeriod;
|
||||||
|
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
|
||||||
|
} // namespace base::internal
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
122
src/base/allocator/partition_allocator/partition_alloc.cc
Normal file
122
src/base/allocator/partition_allocator/partition_alloc.cc
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||||
|
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_page.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||||
|
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
|
||||||
|
// This is from page_allocator_constants.h and doesn't really fit here, but
|
||||||
|
// there isn't a centralized initialization function in page_allocator.cc, so
|
||||||
|
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
(internal::SystemPageSize() & internal::SystemPageOffsetMask()) == 0,
|
||||||
|
"SystemPageSize() must be power of 2");
|
||||||
|
|
||||||
|
// Two partition pages are used as guard / metadata page so make sure the
|
||||||
|
// super page size is bigger.
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
internal::PartitionPageSize() * 4 <= internal::kSuperPageSize,
|
||||||
|
"ok super page size");
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
(internal::kSuperPageSize & internal::SystemPageOffsetMask()) == 0,
|
||||||
|
"ok super page multiple");
|
||||||
|
// Four system pages gives us room to hack out a still-guard-paged piece
|
||||||
|
// of metadata in the middle of a guard partition page.
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
internal::SystemPageSize() * 4 <= internal::PartitionPageSize(),
|
||||||
|
"ok partition page size");
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
(internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
|
||||||
|
"ok partition page multiple");
|
||||||
|
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
|
||||||
|
internal::kPageMetadataSize,
|
||||||
|
"PartitionPage should not be too big");
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=
|
||||||
|
internal::SystemPageSize(),
|
||||||
|
"page metadata fits in hole");
|
||||||
|
|
||||||
|
// Limit to prevent callers accidentally overflowing an int size.
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
internal::MaxDirectMapped() <=
|
||||||
|
(1UL << 31) + internal::DirectMapAllocationGranularity(),
|
||||||
|
"maximum direct mapped allocation");
|
||||||
|
|
||||||
|
// Check that some of our zanier calculations worked out as expected.
|
||||||
|
static_assert(internal::kSmallestBucket == internal::kAlignment,
|
||||||
|
"generic smallest bucket");
|
||||||
|
static_assert(internal::kMaxBucketed == 917504, "generic max bucketed");
|
||||||
|
STATIC_ASSERT_OR_PA_CHECK(
|
||||||
|
internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
|
||||||
|
"System pages per slot span must be no greater than 16.");
|
||||||
|
|
||||||
|
PA_DCHECK(on_out_of_memory);
|
||||||
|
internal::g_oom_handling_function = on_out_of_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartitionAllocGlobalUninitForTesting() {
|
||||||
|
internal::PCScan::UninitForTesting(); // IN-TEST
|
||||||
|
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
internal::PartitionAddressSpace::UninitForTesting();
|
||||||
|
#else
|
||||||
|
internal::AddressPoolManager::GetInstance().ResetForTesting();
|
||||||
|
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||||
|
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
internal::g_oom_handling_function = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
template <bool thread_safe>
|
||||||
|
PartitionAllocator<thread_safe>::~PartitionAllocator() {
|
||||||
|
MemoryReclaimer::Instance()->UnregisterPartition(&partition_root_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool thread_safe>
|
||||||
|
void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
|
||||||
|
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||||
|
PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
|
||||||
|
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
|
||||||
|
#endif
|
||||||
|
partition_root_.Init(opts);
|
||||||
|
MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
|
||||||
|
template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
|
||||||
|
|
||||||
|
#if (BUILDFLAG(PA_DCHECK_IS_ON) || \
|
||||||
|
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||||
|
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||||
|
void CheckThatSlotOffsetIsZero(uintptr_t address) {
|
||||||
|
// Add kPartitionPastAllocationAdjustment, because
|
||||||
|
// PartitionAllocGetSlotStartInBRPPool will subtract it.
|
||||||
|
PA_CHECK(PartitionAllocGetSlotStartInBRPPool(
|
||||||
|
address + kPartitionPastAllocationAdjustment) == address);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
14
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
14
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style license that can be
|
||||||
|
# found in the LICENSE file.
|
||||||
|
|
||||||
|
declare_args() {
|
||||||
|
make_partition_alloc_standalone = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO(): move partition allocator's own args defined by
|
||||||
|
# //base/allocator/allocator.gni here. e.g. put_ref_count_in_previous_slot
|
||||||
|
# (this changes partition alloc's behavior.)
|
||||||
|
# On the otherhand, partition_aloc_as_malloc is related to allocator_shim.
|
||||||
|
# Caller sides should init PA-E. e.g. PartitionAllocMallocInitOnce()
|
||||||
|
|
56
src/base/allocator/partition_allocator/partition_alloc.h
Normal file
56
src/base/allocator/partition_allocator/partition_alloc.h
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_root.h"
|
||||||
|
|
||||||
|
namespace partition_alloc {
|
||||||
|
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||||
|
void PartitionAllocGlobalUninitForTesting();
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
template <bool thread_safe>
|
||||||
|
struct PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocator {
|
||||||
|
PartitionAllocator() = default;
|
||||||
|
~PartitionAllocator();
|
||||||
|
|
||||||
|
void init(PartitionOptions);
|
||||||
|
|
||||||
|
PA_ALWAYS_INLINE PartitionRoot<thread_safe>* root() {
|
||||||
|
return &partition_root_;
|
||||||
|
}
|
||||||
|
PA_ALWAYS_INLINE const PartitionRoot<thread_safe>* root() const {
|
||||||
|
return &partition_root_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
PartitionRoot<thread_safe> partition_root_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
|
||||||
|
|
||||||
|
} // namespace partition_alloc
|
||||||
|
|
||||||
|
namespace base {
|
||||||
|
|
||||||
|
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||||
|
// the migration to the new namespaces gets done.
|
||||||
|
using ::partition_alloc::PartitionAllocator;
|
||||||
|
using ::partition_alloc::PartitionAllocGlobalInit;
|
||||||
|
using ::partition_alloc::PartitionAllocGlobalUninitForTesting;
|
||||||
|
|
||||||
|
} // namespace base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This is a low level implementation of atomic semantics for reference
|
||||||
|
// counting. Please use base/memory/ref_counted.h directly instead.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
namespace partition_alloc::internal::base {
|
||||||
|
|
||||||
|
class AtomicRefCount {
|
||||||
|
public:
|
||||||
|
constexpr AtomicRefCount() : ref_count_(0) {}
|
||||||
|
explicit constexpr AtomicRefCount(int initial_value)
|
||||||
|
: ref_count_(initial_value) {}
|
||||||
|
|
||||||
|
// Increment a reference count.
|
||||||
|
// Returns the previous value of the count.
|
||||||
|
int Increment() { return Increment(1); }
|
||||||
|
|
||||||
|
// Increment a reference count by "increment", which must exceed 0.
|
||||||
|
// Returns the previous value of the count.
|
||||||
|
int Increment(int increment) {
|
||||||
|
return ref_count_.fetch_add(increment, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrement a reference count, and return whether the result is non-zero.
|
||||||
|
// Insert barriers to ensure that state written before the reference count
|
||||||
|
// became zero will be visible to a thread that has just made the count zero.
|
||||||
|
bool Decrement() {
|
||||||
|
// TODO(jbroman): Technically this doesn't need to be an acquire operation
|
||||||
|
// unless the result is 1 (i.e., the ref count did indeed reach zero).
|
||||||
|
// However, there are toolchain issues that make that not work as well at
|
||||||
|
// present (notably TSAN doesn't like it).
|
||||||
|
return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return whether the reference count is one. If the reference count is used
|
||||||
|
// in the conventional way, a reference count of 1 implies that the current
|
||||||
|
// thread owns the reference and no other thread shares it. This call
|
||||||
|
// performs the test for a reference count of one, and performs the memory
|
||||||
|
// barrier needed for the owning thread to act on the object, knowing that it
|
||||||
|
// has exclusive access to the object.
|
||||||
|
bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
|
||||||
|
|
||||||
|
// Return whether the reference count is zero. With conventional object
|
||||||
|
// referencing counting, the object will be destroyed, so the reference count
|
||||||
|
// should never be zero. Hence this is generally used for a debug check.
|
||||||
|
bool IsZero() const {
|
||||||
|
return ref_count_.load(std::memory_order_acquire) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the current reference count (with no barriers). This is subtle, and
|
||||||
|
// should be used only for debugging.
|
||||||
|
int SubtleRefCountForDebug() const {
|
||||||
|
return ref_count_.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic_int ref_count_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal::base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
|
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
|
||||||
|
#if !PA_HAS_BUILTIN(__builtin_bit_cast)
|
||||||
|
#include <string.h> // memcpy
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal::base {
|
||||||
|
|
||||||
|
// This is C++20's std::bit_cast<>().
|
||||||
|
// It morally does what `*reinterpret_cast<Dest*>(&source)` does, but the
|
||||||
|
// cast/deref pair is undefined behavior, while bit_cast<>() isn't.
|
||||||
|
template <class Dest, class Source>
|
||||||
|
#if PA_HAS_BUILTIN(__builtin_bit_cast)
|
||||||
|
constexpr
|
||||||
|
#else
|
||||||
|
inline
|
||||||
|
#endif
|
||||||
|
Dest
|
||||||
|
bit_cast(const Source& source) {
|
||||||
|
#if PA_HAS_BUILTIN(__builtin_bit_cast)
|
||||||
|
// TODO(thakis): Keep only this codepath once nacl is gone or updated.
|
||||||
|
return __builtin_bit_cast(Dest, source);
|
||||||
|
#else
|
||||||
|
static_assert(sizeof(Dest) == sizeof(Source),
|
||||||
|
"bit_cast requires source and destination to be the same size");
|
||||||
|
static_assert(std::is_trivially_copyable_v<Dest>,
|
||||||
|
"bit_cast requires the destination type to be copyable");
|
||||||
|
static_assert(std::is_trivially_copyable_v<Source>,
|
||||||
|
"bit_cast requires the source type to be copyable");
|
||||||
|
|
||||||
|
Dest dest;
|
||||||
|
memcpy(&dest, &source, sizeof(dest));
|
||||||
|
return dest;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal::base
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BIT_CAST_H_
|
@ -0,0 +1,216 @@
|
|||||||
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file defines some bit utilities.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
|
||||||
|
|
||||||
|
#include <climits>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(COMPILER_MSVC)
|
||||||
|
#include <intrin.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace partition_alloc::internal::base::bits {
|
||||||
|
|
||||||
|
// Returns true iff |value| is a power of 2.
|
||||||
|
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||||
|
constexpr bool IsPowerOfTwo(T value) {
|
||||||
|
// From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
|
||||||
|
//
|
||||||
|
// Only positive integers with a single bit set are powers of two. If only one
|
||||||
|
// bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
|
||||||
|
// to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
|
||||||
|
// |x & (x-1)| is 0 iff x is a power of two.
|
||||||
|
return value > 0 && (value & (value - 1)) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Round down |size| to a multiple of alignment, which must be a power of two.
|
||||||
|
inline constexpr size_t AlignDown(size_t size, size_t alignment) {
|
||||||
|
PA_DCHECK(IsPowerOfTwo(alignment));
|
||||||
|
return size & ~(alignment - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move |ptr| back to the previous multiple of alignment, which must be a power
|
||||||
|
// of two. Defined for types where sizeof(T) is one byte.
|
||||||
|
template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
|
||||||
|
inline T* AlignDown(T* ptr, size_t alignment) {
|
||||||
|
return reinterpret_cast<T*>(
|
||||||
|
AlignDown(reinterpret_cast<size_t>(ptr), alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Round up |size| to a multiple of alignment, which must be a power of two.
|
||||||
|
inline constexpr size_t AlignUp(size_t size, size_t alignment) {
|
||||||
|
PA_DCHECK(IsPowerOfTwo(alignment));
|
||||||
|
return (size + alignment - 1) & ~(alignment - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance |ptr| to the next multiple of alignment, which must be a power of
|
||||||
|
// two. Defined for types where sizeof(T) is one byte.
|
||||||
|
template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
|
||||||
|
inline T* AlignUp(T* ptr, size_t alignment) {
|
||||||
|
return reinterpret_cast<T*>(
|
||||||
|
AlignUp(reinterpret_cast<size_t>(ptr), alignment));
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLeadingZeroBits(value) returns the number of zero bits following the
|
||||||
|
// most significant 1 bit in |value| if |value| is non-zero, otherwise it
|
||||||
|
// returns {sizeof(T) * 8}.
|
||||||
|
// Example: 00100010 -> 2
|
||||||
|
//
|
||||||
|
// CountTrailingZeroBits(value) returns the number of zero bits preceding the
|
||||||
|
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
|
||||||
|
// returns {sizeof(T) * 8}.
|
||||||
|
// Example: 00100010 -> 1
|
||||||
|
//
|
||||||
|
// C does not have an operator to do this, but fortunately the various
|
||||||
|
// compilers have built-ins that map to fast underlying processor instructions.
|
||||||
|
//
|
||||||
|
// Prefer the clang path on Windows, as _BitScanReverse() and friends are not
|
||||||
|
// constexpr.
|
||||||
|
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||||
|
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
|
||||||
|
int>::type
|
||||||
|
CountLeadingZeroBits(T x) {
|
||||||
|
static_assert(bits > 0, "invalid instantiation");
|
||||||
|
unsigned long index;
|
||||||
|
return PA_LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
|
||||||
|
? (31 - index - (32 - bits))
|
||||||
|
: bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
|
||||||
|
int>::type
|
||||||
|
CountLeadingZeroBits(T x) {
|
||||||
|
static_assert(bits > 0, "invalid instantiation");
|
||||||
|
unsigned long index;
|
||||||
|
// MSVC only supplies _BitScanReverse64 when building for a 64-bit target.
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
return PA_LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
|
||||||
|
? (63 - index)
|
||||||
|
: 64;
|
||||||
|
#else
|
||||||
|
uint32_t left = static_cast<uint32_t>(x >> 32);
|
||||||
|
if (PA_LIKELY(_BitScanReverse(&index, left)))
|
||||||
|
return 31 - index;
|
||||||
|
|
||||||
|
uint32_t right = static_cast<uint32_t>(x);
|
||||||
|
if (PA_LIKELY(_BitScanReverse(&index, right)))
|
||||||
|
return 63 - index;
|
||||||
|
|
||||||
|
return 64;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
|
||||||
|
int>::type
|
||||||
|
CountTrailingZeroBits(T x) {
|
||||||
|
static_assert(bits > 0, "invalid instantiation");
|
||||||
|
unsigned long index;
|
||||||
|
return PA_LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
|
||||||
|
: bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
|
||||||
|
int>::type
|
||||||
|
CountTrailingZeroBits(T x) {
|
||||||
|
static_assert(bits > 0, "invalid instantiation");
|
||||||
|
unsigned long index;
|
||||||
|
// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
|
||||||
|
#if defined(ARCH_CPU_64_BITS)
|
||||||
|
return PA_LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
|
||||||
|
: 64;
|
||||||
|
#else
|
||||||
|
uint32_t right = static_cast<uint32_t>(x);
|
||||||
|
if (PA_LIKELY(_BitScanForward(&index, right)))
|
||||||
|
return index;
|
||||||
|
|
||||||
|
uint32_t left = static_cast<uint32_t>(x >> 32);
|
||||||
|
if (PA_LIKELY(_BitScanForward(&index, left)))
|
||||||
|
return 32 + index;
|
||||||
|
|
||||||
|
return 64;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#elif defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
|
||||||
|
// __builtin_clz has undefined behaviour for an input of 0, even though there's
|
||||||
|
// clearly a return value that makes sense, and even though some processor clz
|
||||||
|
// instructions have defined behaviour for 0. We could drop to raw __asm__ to
|
||||||
|
// do better, but we'll avoid doing that unless we see proof that we need to.
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE constexpr
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
|
||||||
|
int>::type
|
||||||
|
CountLeadingZeroBits(T value) {
|
||||||
|
static_assert(bits > 0, "invalid instantiation");
|
||||||
|
return PA_LIKELY(value)
|
||||||
|
? bits == 64
|
||||||
|
? __builtin_clzll(static_cast<uint64_t>(value))
|
||||||
|
: __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
|
||||||
|
: bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, int bits = sizeof(T) * 8>
|
||||||
|
PA_ALWAYS_INLINE constexpr
|
||||||
|
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
|
||||||
|
int>::type
|
||||||
|
CountTrailingZeroBits(T value) {
|
||||||
|
return PA_LIKELY(value) ? bits == 64
|
||||||
|
? __builtin_ctzll(static_cast<uint64_t>(value))
|
||||||
|
: __builtin_ctz(static_cast<uint32_t>(value))
|
||||||
|
: bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Returns the integer i such as 2^i <= n < 2^(i+1).
|
||||||
|
//
|
||||||
|
// There is a common `BitLength` function, which returns the number of bits
|
||||||
|
// required to represent a value. Rather than implement that function,
|
||||||
|
// use `Log2Floor` and add 1 to the result.
|
||||||
|
constexpr int Log2Floor(uint32_t n) {
|
||||||
|
return 31 - CountLeadingZeroBits(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the integer i such as 2^(i-1) < n <= 2^i.
|
||||||
|
constexpr int Log2Ceiling(uint32_t n) {
|
||||||
|
// When n == 0, we want the function to return -1.
|
||||||
|
// When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
|
||||||
|
// why the statement below starts with (n ? 32 : -1).
|
||||||
|
return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a value of type T with a single bit set in the left-most position.
|
||||||
|
// Can be used instead of manually shifting a 1 to the left.
|
||||||
|
template <typename T>
|
||||||
|
constexpr T LeftmostBit() {
|
||||||
|
static_assert(std::is_integral<T>::value,
|
||||||
|
"This function can only be used with integral types.");
|
||||||
|
T one(1u);
|
||||||
|
return one << ((CHAR_BIT * sizeof(T) - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal::base::bits
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_
|
@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
// check.h is a widely included header and its size has significant impact on
|
||||||
|
// build time. Try not to raise this limit unless absolutely necessary. See
|
||||||
|
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
|
||||||
|
#ifndef NACL_TC_REV
|
||||||
|
#pragma clang max_tokens_here 17000
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
namespace partition_alloc::internal::logging {
|
||||||
|
|
||||||
|
// TODO(1151236): Make CheckError not to allocate memory. So we can use
|
||||||
|
// CHECK() inside PartitionAllocator when PartitionAllocator-Everywhere is
|
||||||
|
// enabled. (Also need to modify LogMessage).
|
||||||
|
CheckError CheckError::Check(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* condition) {
|
||||||
|
CheckError check_error(new LogMessage(file, line, LOGGING_FATAL));
|
||||||
|
check_error.stream() << "Check failed: " << condition << ". ";
|
||||||
|
return check_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError CheckError::DCheck(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* condition) {
|
||||||
|
CheckError check_error(new LogMessage(file, line, LOGGING_DCHECK));
|
||||||
|
check_error.stream() << "Check failed: " << condition << ". ";
|
||||||
|
return check_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError CheckError::PCheck(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* condition) {
|
||||||
|
SystemErrorCode err_code = logging::GetLastSystemErrorCode();
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
CheckError check_error(
|
||||||
|
new Win32ErrorLogMessage(file, line, LOGGING_FATAL, err_code));
|
||||||
|
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
|
||||||
|
CheckError check_error(
|
||||||
|
new ErrnoLogMessage(file, line, LOGGING_FATAL, err_code));
|
||||||
|
#endif
|
||||||
|
check_error.stream() << "Check failed: " << condition << ". ";
|
||||||
|
return check_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError CheckError::PCheck(const char* file, int line) {
|
||||||
|
return PCheck(file, line, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError CheckError::DPCheck(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* condition) {
|
||||||
|
SystemErrorCode err_code = logging::GetLastSystemErrorCode();
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
CheckError check_error(
|
||||||
|
new Win32ErrorLogMessage(file, line, LOGGING_DCHECK, err_code));
|
||||||
|
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
|
||||||
|
CheckError check_error(
|
||||||
|
new ErrnoLogMessage(file, line, LOGGING_DCHECK, err_code));
|
||||||
|
#endif
|
||||||
|
check_error.stream() << "Check failed: " << condition << ". ";
|
||||||
|
return check_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError CheckError::NotImplemented(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* function) {
|
||||||
|
CheckError check_error(new LogMessage(file, line, LOGGING_ERROR));
|
||||||
|
check_error.stream() << "Not implemented reached in " << function;
|
||||||
|
return check_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ostream& CheckError::stream() {
|
||||||
|
return log_message_->stream();
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError::~CheckError() {
|
||||||
|
// Note: This function ends up in crash stack traces. If its full name
|
||||||
|
// changes, the crash server's magic signature logic needs to be updated.
|
||||||
|
// See cl/306632920.
|
||||||
|
delete log_message_;
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckError::CheckError(LogMessage* log_message) : log_message_(log_message) {}
|
||||||
|
|
||||||
|
void RawCheck(const char* message) {
|
||||||
|
RawLog(LOGGING_FATAL, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RawError(const char* message) {
|
||||||
|
RawLog(LOGGING_ERROR, message);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal::logging
|
@ -0,0 +1,179 @@
|
|||||||
|
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
|
||||||
|
|
||||||
|
#include <iosfwd>
|
||||||
|
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||||
|
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
|
||||||
|
|
||||||
|
// This header defines the CHECK, DCHECK, and DPCHECK macros.
|
||||||
|
//
|
||||||
|
// CHECK dies with a fatal error if its condition is not true. It is not
|
||||||
|
// controlled by NDEBUG, so the check will be executed regardless of compilation
|
||||||
|
// mode.
|
||||||
|
//
|
||||||
|
// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and
|
||||||
|
// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE.
|
||||||
|
//
|
||||||
|
// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f.
|
||||||
|
// perror(3)).
|
||||||
|
//
|
||||||
|
// Additional information can be streamed to these macros and will be included
|
||||||
|
// in the log output if the condition doesn't hold (you may need to include
|
||||||
|
// <ostream>):
|
||||||
|
//
|
||||||
|
// CHECK(condition) << "Additional info.";
|
||||||
|
//
|
||||||
|
// The condition is evaluated exactly once. Even in build modes where e.g.
|
||||||
|
// DCHECK is disabled, the condition and any stream arguments are still
|
||||||
|
// referenced to avoid warnings about unused variables and functions.
|
||||||
|
//
|
||||||
|
// For the (D)CHECK_EQ, etc. macros, see base/check_op.h. However, that header
|
||||||
|
// is *significantly* larger than check.h, so try to avoid including it in
|
||||||
|
// header files.
|
||||||
|
|
||||||
|
namespace partition_alloc::internal::logging {
|
||||||
|
|
||||||
|
// Class used to explicitly ignore an ostream, and optionally a boolean value.
|
||||||
|
class VoidifyStream {
|
||||||
|
public:
|
||||||
|
VoidifyStream() = default;
|
||||||
|
explicit VoidifyStream(bool ignored) {}
|
||||||
|
|
||||||
|
// This operator has lower precedence than << but higher than ?:
|
||||||
|
void operator&(std::ostream&) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper macro which avoids evaluating the arguments to a stream if the
|
||||||
|
// condition is false.
|
||||||
|
#define PA_LAZY_CHECK_STREAM(stream, condition) \
|
||||||
|
!(condition) \
|
||||||
|
? (void)0 \
|
||||||
|
: ::partition_alloc::internal::logging::VoidifyStream() & (stream)
|
||||||
|
|
||||||
|
// Macro which uses but does not evaluate expr and any stream parameters.
|
||||||
|
#define PA_EAT_CHECK_STREAM_PARAMS(expr) \
|
||||||
|
true ? (void)0 \
|
||||||
|
: ::partition_alloc::internal::logging::VoidifyStream(expr) & \
|
||||||
|
(*::partition_alloc::internal::logging::g_swallow_stream)
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern std::ostream* g_swallow_stream;
|
||||||
|
|
||||||
|
class LogMessage;
|
||||||
|
|
||||||
|
// Class used for raising a check error upon destruction.
|
||||||
|
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CheckError {
|
||||||
|
public:
|
||||||
|
static CheckError Check(const char* file, int line, const char* condition);
|
||||||
|
|
||||||
|
static CheckError DCheck(const char* file, int line, const char* condition);
|
||||||
|
|
||||||
|
static CheckError PCheck(const char* file, int line, const char* condition);
|
||||||
|
static CheckError PCheck(const char* file, int line);
|
||||||
|
|
||||||
|
static CheckError DPCheck(const char* file, int line, const char* condition);
|
||||||
|
|
||||||
|
static CheckError NotImplemented(const char* file,
|
||||||
|
int line,
|
||||||
|
const char* function);
|
||||||
|
|
||||||
|
// Stream for adding optional details to the error message.
|
||||||
|
std::ostream& stream();
|
||||||
|
|
||||||
|
PA_NOMERGE ~CheckError();
|
||||||
|
|
||||||
|
CheckError(const CheckError& other) = delete;
|
||||||
|
CheckError& operator=(const CheckError& other) = delete;
|
||||||
|
CheckError(CheckError&& other) = default;
|
||||||
|
CheckError& operator=(CheckError&& other) = default;
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit CheckError(LogMessage* log_message);
|
||||||
|
|
||||||
|
LogMessage* log_message_;
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
|
||||||
|
|
||||||
|
// Discard log strings to reduce code bloat.
|
||||||
|
//
|
||||||
|
// This is not calling BreakDebugger since this is called frequently, and
|
||||||
|
// calling an out-of-line function instead of a noreturn inline macro prevents
|
||||||
|
// compiler optimizations.
|
||||||
|
#define PA_BASE_CHECK(condition) \
|
||||||
|
PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
|
||||||
|
: PA_EAT_CHECK_STREAM_PARAMS()
|
||||||
|
|
||||||
|
// TODO(1151236): base/test/gtest_util.h uses CHECK_WILL_STREAM(). After
|
||||||
|
// copying (or removing) gtest_util.h and removing gtest_uti.h from partition
|
||||||
|
// allocator's DEPS, rename or remove CHECK_WILL_STREAM().
|
||||||
|
#define CHECK_WILL_STREAM() false
|
||||||
|
|
||||||
|
#define PA_BASE_PCHECK(condition) \
|
||||||
|
PA_LAZY_CHECK_STREAM( \
|
||||||
|
::partition_alloc::internal::logging::CheckError::PCheck(__FILE__, \
|
||||||
|
__LINE__) \
|
||||||
|
.stream(), \
|
||||||
|
PA_UNLIKELY(!(condition)))
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define PA_BASE_CHECK(condition) \
|
||||||
|
PA_LAZY_CHECK_STREAM( \
|
||||||
|
::partition_alloc::internal::logging::CheckError::Check( \
|
||||||
|
__FILE__, __LINE__, #condition) \
|
||||||
|
.stream(), \
|
||||||
|
!PA_ANALYZER_ASSUME_TRUE(condition))
|
||||||
|
|
||||||
|
#define CHECK_WILL_STREAM() true
|
||||||
|
|
||||||
|
#define PA_BASE_PCHECK(condition) \
|
||||||
|
PA_LAZY_CHECK_STREAM( \
|
||||||
|
::partition_alloc::internal::logging::CheckError::PCheck( \
|
||||||
|
__FILE__, __LINE__, #condition) \
|
||||||
|
.stream(), \
|
||||||
|
!PA_ANALYZER_ASSUME_TRUE(condition))
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||||
|
|
||||||
|
#define PA_BASE_DCHECK(condition) \
|
||||||
|
PA_LAZY_CHECK_STREAM( \
|
||||||
|
::partition_alloc::internal::logging::CheckError::DCheck( \
|
||||||
|
__FILE__, __LINE__, #condition) \
|
||||||
|
.stream(), \
|
||||||
|
!PA_ANALYZER_ASSUME_TRUE(condition))
|
||||||
|
|
||||||
|
#define PA_BASE_DPCHECK(condition) \
|
||||||
|
PA_LAZY_CHECK_STREAM( \
|
||||||
|
::partition_alloc::internal::logging::CheckError::DPCheck( \
|
||||||
|
__FILE__, __LINE__, #condition) \
|
||||||
|
.stream(), \
|
||||||
|
!PA_ANALYZER_ASSUME_TRUE(condition))
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define PA_BASE_DCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
|
||||||
|
#define PA_BASE_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Async signal safe checking mechanism.
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RawCheck(const char* message);
|
||||||
|
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void RawError(const char* message);
|
||||||
|
#define PA_RAW_CHECK(condition) \
|
||||||
|
do { \
|
||||||
|
if (!(condition)) \
|
||||||
|
::partition_alloc::internal::logging::RawCheck( \
|
||||||
|
"Check failed: " #condition "\n"); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal::logging
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CHECK_H_
|
@ -0,0 +1,377 @@
|
|||||||
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
|
||||||
|
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
|
||||||
|
|
||||||
|
#include "build/build_config.h"
|
||||||
|
|
||||||
|
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||||
|
#error "Only clang-cl is supported on Windows, see https://crbug.com/988071"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// This is a wrapper around `__has_cpp_attribute`, which can be used to test for
|
||||||
|
// the presence of an attribute. In case the compiler does not support this
|
||||||
|
// macro it will simply evaluate to 0.
|
||||||
|
//
|
||||||
|
// References:
|
||||||
|
// https://wg21.link/sd6#testing-for-the-presence-of-an-attribute-__has_cpp_attribute
|
||||||
|
// https://wg21.link/cpp.cond#:__has_cpp_attribute
|
||||||
|
#if defined(__has_cpp_attribute)
|
||||||
|
#define PA_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
|
||||||
|
#else
|
||||||
|
#define PA_HAS_CPP_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
|
||||||
|
#if defined(__has_attribute)
|
||||||
|
#define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
|
||||||
|
#else
|
||||||
|
#define PA_HAS_ATTRIBUTE(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// A wrapper around `__has_builtin`, similar to HAS_CPP_ATTRIBUTE.
|
||||||
|
#if defined(__has_builtin)
|
||||||
|
#define PA_HAS_BUILTIN(x) __has_builtin(x)
|
||||||
|
#else
|
||||||
|
#define PA_HAS_BUILTIN(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Annotate a function indicating it should not be inlined.
|
||||||
|
// Use like:
|
||||||
|
// NOINLINE void DoStuff() { ... }
|
||||||
|
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
#define PA_NOINLINE __attribute__((noinline))
|
||||||
|
#elif defined(COMPILER_MSVC)
|
||||||
|
#define PA_NOINLINE __declspec(noinline)
|
||||||
|
#else
|
||||||
|
#define PA_NOINLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(COMPILER_GCC) && defined(NDEBUG)
|
||||||
|
#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
|
||||||
|
#elif defined(COMPILER_MSVC) && defined(NDEBUG)
|
||||||
|
#define PA_ALWAYS_INLINE __forceinline
|
||||||
|
#else
|
||||||
|
#define PA_ALWAYS_INLINE inline
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Annotate a function indicating it should never be tail called. Useful to make
|
||||||
|
// sure callers of the annotated function are never omitted from call-stacks.
|
||||||
|
// To provide the complementary behavior (prevent the annotated function from
|
||||||
|
// being omitted) look at NOINLINE. Also note that this doesn't prevent code
|
||||||
|
// folding of multiple identical caller functions into a single signature. To
|
||||||
|
// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
|
||||||
|
// Use like:
|
||||||
|
// void NOT_TAIL_CALLED FooBar();
|
||||||
|
#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
|
||||||
|
#define PA_NOT_TAIL_CALLED __attribute__((not_tail_called))
|
||||||
|
#else
|
||||||
|
#define PA_NOT_TAIL_CALLED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Specify memory alignment for structs, classes, etc.
|
||||||
|
// Use like:
|
||||||
|
// class ALIGNAS(16) MyClass { ... }
|
||||||
|
// ALIGNAS(16) int array[4];
|
||||||
|
//
|
||||||
|
// In most places you can use the C++11 keyword "alignas", which is preferred.
|
||||||
|
//
|
||||||
|
// But compilers have trouble mixing __attribute__((...)) syntax with
|
||||||
|
// alignas(...) syntax.
|
||||||
|
//
|
||||||
|
// Doesn't work in clang or gcc:
|
||||||
|
// struct alignas(16) __attribute__((packed)) S { char c; };
|
||||||
|
// Works in clang but not gcc:
|
||||||
|
// struct __attribute__((packed)) alignas(16) S2 { char c; };
|
||||||
|
// Works in clang and gcc:
|
||||||
|
// struct alignas(16) S3 { char c; } __attribute__((packed));
|
||||||
|
//
|
||||||
|
// There are also some attributes that must be specified *before* a class
|
||||||
|
// definition: visibility (used for exporting functions/classes) is one of
|
||||||
|
// these attributes. This means that it is not possible to use alignas() with a
|
||||||
|
// class that is marked as exported.
|
||||||
|
#if defined(COMPILER_MSVC)
|
||||||
|
#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
|
||||||
|
#elif defined(COMPILER_GCC)
|
||||||
|
#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// In case the compiler supports it NO_UNIQUE_ADDRESS evaluates to the C++20
|
||||||
|
// attribute [[no_unique_address]]. This allows annotating data members so that
|
||||||
|
// they need not have an address distinct from all other non-static data members
|
||||||
|
// of its class.
|
||||||
|
//
|
||||||
|
// References:
|
||||||
|
// * https://en.cppreference.com/w/cpp/language/attributes/no_unique_address
|
||||||
|
// * https://wg21.link/dcl.attr.nouniqueaddr
|
||||||
|
#if PA_HAS_CPP_ATTRIBUTE(no_unique_address)
|
||||||
|
#define PA_NO_UNIQUE_ADDRESS [[no_unique_address]]
|
||||||
|
#else
|
||||||
|
#define PA_NO_UNIQUE_ADDRESS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Tell the compiler a function is using a printf-style format string.
|
||||||
|
// |format_param| is the one-based index of the format string parameter;
|
||||||
|
// |dots_param| is the one-based index of the "..." parameter.
|
||||||
|
// For v*printf functions (which take a va_list), pass 0 for dots_param.
|
||||||
|
// (This is undocumented but matches what the system C headers do.)
|
||||||
|
// For member functions, the implicit this parameter counts as index 1.
|
||||||
|
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
#define PA_PRINTF_FORMAT(format_param, dots_param) \
|
||||||
|
__attribute__((format(printf, format_param, dots_param)))
|
||||||
|
#else
|
||||||
|
#define PA_PRINTF_FORMAT(format_param, dots_param)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// WPRINTF_FORMAT is the same, but for wide format strings.
|
||||||
|
// This doesn't appear to yet be implemented in any compiler.
|
||||||
|
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
|
||||||
|
#define PA_WPRINTF_FORMAT(format_param, dots_param)
|
||||||
|
// If available, it would look like:
|
||||||
|
// __attribute__((format(wprintf, format_param, dots_param)))
|
||||||
|
|
||||||
|
// Sanitizers annotations.
|
||||||
|
#if PA_HAS_ATTRIBUTE(no_sanitize)
|
||||||
|
#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
|
||||||
|
#endif
|
||||||
|
#if !defined(PA_NO_SANITIZE)
|
||||||
|
#define PA_NO_SANITIZE(what)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// MemorySanitizer annotations.
|
||||||
|
#if defined(MEMORY_SANITIZER) && !BUILDFLAG(IS_NACL)
|
||||||
|
#include <sanitizer/msan_interface.h>
|
||||||
|
|
||||||
|
// Mark a memory region fully initialized.
|
||||||
|
// Use this to annotate code that deliberately reads uninitialized data, for
|
||||||
|
// example a GC scavenging root set pointers from the stack.
|
||||||
|
#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
|
||||||
|
|
||||||
|
// Check a memory region for initializedness, as if it was being used here.
|
||||||
|
// If any bits are uninitialized, crash with an MSan report.
|
||||||
|
// Use this to sanitize data which MSan won't be able to track, e.g. before
|
||||||
|
// passing data to another process via shared memory.
|
||||||
|
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
|
||||||
|
__msan_check_mem_is_initialized(p, size)
|
||||||
|
#else // MEMORY_SANITIZER
|
||||||
|
#define PA_MSAN_UNPOISON(p, size)
|
||||||
|
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
|
||||||
|
#endif // MEMORY_SANITIZER
|
||||||
|
|
||||||
|
// Macro useful for writing cross-platform function pointers.
|
||||||
|
#if !defined(PA_CDECL)
|
||||||
|
#if BUILDFLAG(IS_WIN)
|
||||||
|
#define PA_CDECL __cdecl
|
||||||
|
#else // BUILDFLAG(IS_WIN)
|
||||||
|
#define PA_CDECL
|
||||||
|
#endif // BUILDFLAG(IS_WIN)
|
||||||
|
#endif // !defined(PA_CDECL)
|
||||||
|
|
||||||
|
// Macro for hinting that an expression is likely to be false.
|
||||||
|
#if !defined(PA_UNLIKELY)
|
||||||
|
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
#define PA_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||||
|
#else
|
||||||
|
#define PA_UNLIKELY(x) (x)
|
||||||
|
#endif // defined(COMPILER_GCC)
|
||||||
|
#endif // !defined(PA_UNLIKELY)
|
||||||
|
|
||||||
|
#if !defined(PA_LIKELY)
|
||||||
|
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
#define PA_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||||
|
#else
|
||||||
|
#define PA_LIKELY(x) (x)
|
||||||
|
#endif // defined(COMPILER_GCC)
|
||||||
|
#endif // !defined(PA_LIKELY)
|
||||||
|
|
||||||
|
// Compiler feature-detection.
|
||||||
|
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
|
||||||
|
#if defined(__has_feature)
|
||||||
|
#define PA_HAS_FEATURE(FEATURE) __has_feature(FEATURE)
|
||||||
|
#else
|
||||||
|
#define PA_HAS_FEATURE(FEATURE) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(COMPILER_GCC)
|
||||||
|
#define PA_PRETTY_FUNCTION __PRETTY_FUNCTION__
|
||||||
|
#elif defined(COMPILER_MSVC)
|
||||||
|
#define PA_PRETTY_FUNCTION __FUNCSIG__
|
||||||
|
#else
|
||||||
|
// See https://en.cppreference.com/w/c/language/function_definition#func
|
||||||
|
#define PA_PRETTY_FUNCTION __func__
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(PA_CPU_ARM_NEON)
|
||||||
|
#if defined(__arm__)
|
||||||
|
#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
|
||||||
|
!defined(__VFP_FP__) && !defined(_WIN32_WCE) && !defined(ANDROID)
|
||||||
|
#error Chromium does not support middle endian architecture
|
||||||
|
#endif
|
||||||
|
#if defined(__ARM_NEON__)
|
||||||
|
#define PA_CPU_ARM_NEON 1
|
||||||
|
#endif
|
||||||
|
#endif // defined(__arm__)
|
||||||
|
#endif // !defined(CPU_ARM_NEON)
|
||||||
|
|
||||||
|
#if !defined(PA_HAVE_MIPS_MSA_INTRINSICS)
|
||||||
|
#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
|
||||||
|
#define PA_HAVE_MIPS_MSA_INTRINSICS 1
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__clang__) && PA_HAS_ATTRIBUTE(uninitialized)
|
||||||
|
// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
|
||||||
|
// the specified variable.
|
||||||
|
// Library-wide alternative is
|
||||||
|
// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn
|
||||||
|
// file.
|
||||||
|
//
|
||||||
|
// See "init_stack_vars" in build/config/compiler/BUILD.gn and
|
||||||
|
// http://crbug.com/977230
|
||||||
|
// "init_stack_vars" is enabled for non-official builds and we hope to enable it
|
||||||
|
// in official build in 2020 as well. The flag writes fixed pattern into
|
||||||
|
// uninitialized parts of all local variables. In rare cases such initialization
|
||||||
|
// is undesirable and attribute can be used:
|
||||||
|
// 1. Degraded performance
|
||||||
|
// In most cases compiler is able to remove additional stores. E.g. if memory is
|
||||||
|
// never accessed or properly initialized later. Preserved stores mostly will
|
||||||
|
// not affect program performance. However if compiler failed on some
|
||||||
|
// performance critical code we can get a visible regression in a benchmark.
|
||||||
|
// 2. memset, memcpy calls
|
||||||
|
// Compiler may replaces some memory writes with memset or memcpy calls. This is
|
||||||
|
// not -ftrivial-auto-var-init specific, but it can happen more likely with the
|
||||||
|
// flag. It can be a problem if code is not linked with C run-time library.
|
||||||
|
//
|
||||||
|
// Note: The flag is security risk mitigation feature. So in future the
|
||||||
|
// attribute uses should be avoided when possible. However to enable this
|
||||||
|
// mitigation on the most of the code we need to be less strict now and minimize
|
||||||
|
// number of exceptions later. So if in doubt feel free to use attribute, but
|
||||||
|
// please document the problem for someone who is going to cleanup it later.
|
||||||
|
// E.g. platform, bot, benchmark or test name in patch description or next to
|
||||||
|
// the attribute.
|
||||||
|
#define PA_STACK_UNINITIALIZED __attribute__((uninitialized))
|
||||||
|
#else
|
||||||
|
#define PA_STACK_UNINITIALIZED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Attribute "no_stack_protector" disables -fstack-protector for the specified
|
||||||
|
// function.
|
||||||
|
//
|
||||||
|
// "stack_protector" is enabled on most POSIX builds. The flag adds a canary
|
||||||
|
// to each stack frame, which on function return is checked against a reference
|
||||||
|
// canary. If the canaries do not match, it's likely that a stack buffer
|
||||||
|
// overflow has occurred, so immediately crashing will prevent exploitation in
|
||||||
|
// many cases.
|
||||||
|
//
|
||||||
|
// In some cases it's desirable to remove this, e.g. on hot functions, or if
|
||||||
|
// we have purposely changed the reference canary.
|
||||||
|
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||||
|
#if PA_HAS_ATTRIBUTE(__no_stack_protector__)
|
||||||
|
#define PA_NO_STACK_PROTECTOR __attribute__((__no_stack_protector__))
|
||||||
|
#else
|
||||||
|
#define PA_NO_STACK_PROTECTOR \
|
||||||
|
__attribute__((__optimize__("-fno-stack-protector")))
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
#define PA_NO_STACK_PROTECTOR
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
|
||||||
|
// to Clang which control what code paths are statically analyzed,
|
||||||
|
// and is meant to be used in conjunction with assert & assert-like functions.
|
||||||
|
// The expression is passed straight through if analysis isn't enabled.
|
||||||
|
//
|
||||||
|
// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current
|
||||||
|
// codepath and any other branching codepaths that might follow.
|
||||||
|
#if defined(__clang_analyzer__)
|
||||||
|
|
||||||
|
namespace partition_alloc::internal {
|
||||||
|
|
||||||
|
inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline constexpr bool AnalyzerAssumeTrue(bool arg) {
|
||||||
|
// PartitionAllocAnalyzerNoReturn() is invoked and analysis is terminated if
|
||||||
|
// |arg| is false.
|
||||||
|
return arg || AnalyzerNoReturn();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace partition_alloc::internal
|
||||||
|
|
||||||
|
#define PA_ANALYZER_ASSUME_TRUE(arg) \
|
||||||
|
::partition_alloc::internal::AnalyzerAssumeTrue(!!(arg))
|
||||||
|
#define PA_ANALYZER_SKIP_THIS_PATH() \
|
||||||
|
static_cast<void>(::partition_alloc::internal::AnalyzerNoReturn())
|
||||||
|
|
||||||
|
#else // !defined(__clang_analyzer__)
|
||||||
|
|
||||||
|
#define PA_ANALYZER_ASSUME_TRUE(arg) (arg)
|
||||||
|
#define PA_ANALYZER_SKIP_THIS_PATH()
|
||||||
|
|
||||||
|
#endif // defined(__clang_analyzer__)
|
||||||
|
|
||||||
|
// Use nomerge attribute to disable optimization of merging multiple same calls.
|
||||||
|
#if defined(__clang__) && PA_HAS_ATTRIBUTE(nomerge)
|
||||||
|
#define PA_NOMERGE [[clang::nomerge]]
|
||||||
|
#else
|
||||||
|
#define PA_NOMERGE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Marks a type as being eligible for the "trivial" ABI despite having a
|
||||||
|
// non-trivial destructor or copy/move constructor. Such types can be relocated
|
||||||
|
// after construction by simply copying their memory, which makes them eligible
|
||||||
|
// to be passed in registers. The canonical example is std::unique_ptr.
|
||||||
|
//
|
||||||
|
// Use with caution; this has some subtle effects on constructor/destructor
|
||||||
|
// ordering and will be very incorrect if the type relies on its address
|
||||||
|
// remaining constant. When used as a function argument (by value), the value
|
||||||
|
// may be constructed in the caller's stack frame, passed in a register, and
|
||||||
|
// then used and destructed in the callee's stack frame. A similar thing can
|
||||||
|
// occur when values are returned.
|
||||||
|
//
|
||||||
|
// TRIVIAL_ABI is not needed for types which have a trivial destructor and
|
||||||
|
// copy/move constructors, such as base::TimeTicks and other POD.
|
||||||
|
//
|
||||||
|
// It is also not likely to be effective on types too large to be passed in one
|
||||||
|
// or two registers on typical target ABIs.
|
||||||
|
//
|
||||||
|
// See also:
|
||||||
|
// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
|
||||||
|
// https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html
|
||||||
|
#if defined(__clang__) && PA_HAS_ATTRIBUTE(trivial_abi)
|
||||||
|
#define PA_TRIVIAL_ABI [[clang::trivial_abi]]
|
||||||
|
#else
|
||||||
|
#define PA_TRIVIAL_ABI
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Marks a member function as reinitializing a moved-from variable.
|
||||||
|
// See also
|
||||||
|
// https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
|
||||||
|
#if defined(__clang__) && PA_HAS_ATTRIBUTE(reinitializes)
|
||||||
|
#define PA_REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
|
||||||
|
#else
|
||||||
|
#define PA_REINITIALIZES_AFTER_MOVE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Requires constant initialization. See constinit in C++20. Allows to rely on a
|
||||||
|
// variable being initialized before execution, and not requiring a global
|
||||||
|
// constructor.
|
||||||
|
#if PA_HAS_ATTRIBUTE(require_constant_initialization)
|
||||||
|
#define PA_CONSTINIT __attribute__((require_constant_initialization))
|
||||||
|
#endif
|
||||||
|
#if !defined(PA_CONSTINIT)
|
||||||
|
#define PA_CONSTINIT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__clang__)
|
||||||
|
#define PA_GSL_OWNER [[gsl::Owner]]
|
||||||
|
#define PA_GSL_POINTER [[gsl::Pointer]]
|
||||||
|
#else
|
||||||
|
#define PA_GSL_OWNER
|
||||||
|
#define PA_GSL_POINTER
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_COMPILER_SPECIFIC_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user