Import chromium-90.0.4430.85

This commit is contained in:
klzgrad 2021-05-21 00:50:53 +08:00
commit c4a0edfca5
11115 changed files with 2891116 additions and 0 deletions

39
src/.clang-format Normal file
View File

@ -0,0 +1,39 @@
# Defines the Chromium style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium
# This defaults to 'Auto'. Explicitly set it for a while, so that
# 'vector<vector<int> >' in existing files gets formatted to
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
# 'int>>' if the file already contains at least one such instance.)
Standard: Cpp11
# Make sure code like:
# IPC_BEGIN_MESSAGE_MAP()
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
# IPC_END_MESSAGE_MAP()
# gets correctly indented.
MacroBlockBegin: "^\
BEGIN_MSG_MAP|\
BEGIN_MSG_MAP_EX|\
BEGIN_SAFE_MSG_MAP_EX|\
CR_BEGIN_MSG_MAP_EX|\
IPC_BEGIN_MESSAGE_MAP|\
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
IPC_STRUCT_BEGIN|\
IPC_STRUCT_BEGIN_WITH_PARENT|\
IPC_STRUCT_TRAITS_BEGIN|\
POLPARAMS_BEGIN|\
PPAPI_BEGIN_MESSAGE_MAP$"
MacroBlockEnd: "^\
CR_END_MSG_MAP|\
END_MSG_MAP|\
IPC_END_MESSAGE_MAP|\
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
IPC_STRUCT_END|\
IPC_STRUCT_TRAITS_END|\
POLPARAMS_END|\
PPAPI_END_MESSAGE_MAP$"
# TODO: Remove this once clang-format r357700 is rolled in.
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']

56
src/.gitattributes vendored Normal file
View File

@ -0,0 +1,56 @@
# Stop Windows python license check presubmit errors by forcing LF checkout.
*.py text eol=lf
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
/net/http/*.pins text eol=lf
# Force LF checkout for all source files
*.bin binary
*.c text eol=lf
*.cc text eol=lf
*.cpp text eol=lf
*.csv text eol=lf
*.grd text eol=lf
*.grdp text eol=lf
*.gn text eol=lf
*.gni text eol=lf
*.h text eol=lf
*.html text eol=lf
*.idl text eol=lf
*.in text eol=lf
*.inc text eol=lf
*.java text eol=lf
*.js text eol=lf
*.json text eol=lf
*.json5 text eol=lf
*.md text eol=lf
*.mm text eol=lf
*.mojom text eol=lf
*.pdf -diff
*.proto text eol=lf
*.sh text eol=lf
*.sql text eol=lf
*.txt text eol=lf
*.xml text eol=lf
*.xslt text eol=lf
.clang-format text eol=lf
.eslintrc.js text eol=lf
.git-blame-ignore-revs text eol=lf
.gitattributes text eol=lf
.gitignore text eol=lf
.vpython text eol=lf
codereview.settings text eol=lf
DEPS text eol=lf
ENG_REVIEW_OWNERS text eol=lf
LICENSE text eol=lf
LICENSE.* text eol=lf
MAJOR_BRANCH_DATE text eol=lf
OWNERS text eol=lf
README text eol=lf
README.* text eol=lf
WATCHLISTS text eol=lf
VERSION text eol=lf
DIR_METADATA text eol=lf
# Skip Tricium by default on files in third_party.
third_party/** -tricium

207
src/.gn Normal file
View File

@ -0,0 +1,207 @@
# This file is used by the GN meta build system to find the root of the source
# tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line.
import("//build/dotfile_settings.gni")
import("//third_party/angle/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# These arguments override the default values for items in a declare_args
# block. "gn args" in turn can override these.
#
# In general the value for a build arg in the declare_args block should be the
# default. In some cases, a DEPS-ed in project will want different defaults for
# being built as part of Chrome vs. being built standalone. In this case, the
# Chrome defaults should go here. There should be no overrides here for
# values declared in the main Chrome repository.
#
# Important note for defining defaults: This file is executed before the
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
# This means that the default_args can not depend on the platform,
# architecture, or other build parameters. If you really need that, the other
# repo should define a flag that toggles on a behavior that implements the
# additional logic required by Chrome to set the variables.
default_args = {
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
# a warning that "Build argument has no effect". When adding a v8 variable, it
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
# location when it is removed).
v8_extra_library_files = []
v8_experimental_extra_library_files = []
v8_enable_gdbjit = false
v8_imminent_deprecation_warnings = false
# TODO(jochen): Remove this. http://crbug.com/v8/5830,
# http://crbug.com/728583.
v8_check_microtasks_scopes_consistency = false
# Don't include webrtc's builtin task queue implementation.
rtc_link_task_queue_impl = false
# Don't include the iLBC audio codec.
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
# deps on codecs, we can remove this.
rtc_include_ilbc = false
# Changes some setup for the Crashpad build to set them to build against
# Chromium's zlib, base, etc.
crashpad_dependencies = "chromium"
# Override ANGLE's Vulkan dependencies.
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
angle_vulkan_validation_layers_dir =
"//third_party/vulkan-deps/vulkan-validation-layers/src"
}
# These are the targets to skip header checking by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will not have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
no_check_targets = [
# //chrome/*, https://crbug.com/949535
"//chrome/browser/media/router/discovery:*", # 7 errors
"//chrome/browser/media/router:*", # 225 errors
"//chrome/browser/safe_browsing/android:*", # 3 errors
"//chrome/browser/safety_check/android:*", # 3 errors
"//chrome/browser/storage_access_api:*", # 2 errors
"//chrome/browser/touch_to_fill/android:*", # 8 errors
"//chrome/test:*", # 2682 errors
"//extensions/browser/api/declarative_net_request:*", # 18 errors
"//extensions/browser/api/declarative_webrequest:*", # 29 errors
"//extensions/browser/api/web_request:*", # 37 errors
"//extensions/browser/api:*", # 7 errors
"//extensions/browser:*", # 20 errors
"//extensions:*", # 75 errors
"//headless:*", # 167 errors
"//jingle:*", # 4 errors
"//native_client/src/trusted/service_runtime:*", # 2 errors
"//ppapi/cpp/private:*", # 1 error
"//ppapi/host:*", # 1 error
"//ppapi/native_client/src/untrusted/pnacl_irt_shim:*", # 197 errors
"//ppapi/proxy:*", # 31 errors
"//ppapi/thunk:*", # 1071 errors
"//ppapi:*", # 3 errors
"//remoting/codec:*", # 32 errors
"//remoting/host/file_transfer:*", # 43 errors
"//remoting/host/installer/mac:*", # 1 error
"//remoting/host/it2me:*", # 18 errors
"//remoting/host/mac:*", # 49 errors
"//remoting/host/security_key:*", # 68 errors
"//remoting/host/setup:*", # 9 errors
"//remoting/host/win:*", # 43 errors
"//remoting/host:*", # 164 errors
"//remoting/ios/app/settings:*", # 6 errors
"//remoting/ios/app:*", # 9 errors
"//remoting/ios/audio:*", # 5 errors
"//remoting/ios/domain:*", # 2 errors
"//remoting/ios/facade:*", # 8 errors
"//remoting/ios/persistence:*", # 10 errors
"//remoting/ios/session:*", # 7 errors
"//remoting/ios:*", # 2 errors
"//remoting/protocol:*", # 142 errors
"//remoting/test:*", # 20 errors
"//sandbox/win:*", # 7 errors
"//third_party/ffmpeg:*", # 1 error
"//third_party/icu/*",
"//third_party/libvpx:*", # 164 errors
"//third_party/libwebp:*", # 80 errors, https://crbug.com/800762
# //v8/*, https://crbug.com/v8/7330
"//v8/src/inspector:*", # 20 errors
"//v8/test/cctest:*", # 26 errors
"//v8/test/unittests:*", # 11 errors
"//v8/test/wasm-api-tests:*", # 13 errors
"//v8/third_party/inspector_protocol:*", # 2 errors
"//v8/tools/debug_helper:*", # 9 errors
"//v8/tools/v8windbg:*", # 2 errors
"//v8:*", # 1871 errors
]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged.
#
# PLEASE READ
#
# You should almost never need to add new exec_script calls. exec_script is
# slow, especially on Windows, and can cause confusing effects. Although
# individually each call isn't slow or necessarily very confusing, at the scale
# of our repo things get out of hand quickly. By strongly pushing back on all
# additions, we keep the build fast and clean. If you think you need to add a
# new call, please consider:
#
# - Do not use a script to check for the existence of a file or directory to
# enable a different mode. Instead, use GN build args to enable or disable
# functionality and set options. An example is checking for a file in the
# src-internal repo to see if the corresponding src-internal feature should
# be enabled. There are several things that can go wrong with this:
#
# - It's mysterious what causes some things to happen. Although in many cases
# such behavior can be conveniently automatic, GN optimizes for explicit
# and obvious behavior so people can more easily diagnose problems.
#
# - The user can't enable a mode for one build and not another. With GN build
# args, the user can choose the exact configuration of multiple builds
# using one checkout. But implicitly basing flags on the state of the
# checkout, this functionality is broken.
#
# - It's easy to get stale files. If for example the user edits the gclient
# to stop checking out src-internal (or any other optional thing), it's
# easy to end up with stale files still mysteriously triggering build
# conditions that are no longer appropriate (yes, this happens in real
# life).
#
# - Do not use a script to iterate files in a directory (glob):
#
# - This has the same "stale file" problem as the above discussion. Various
# operations can leave untracked files in the source tree which can cause
# surprising effects.
#
# - It becomes impossible to use "git grep" to find where a certain file is
# referenced. This operation is very common and people really do get
# confused when things aren't listed.
#
# - It's easy to screw up. One common case is a build-time script that packs
# up a directory. The author notices that the script isn't re-run when the
# directory is updated, so adds a glob so all the files are listed as
# inputs. This seems to work great... until a file is deleted. When a
# file is deleted, all the inputs the glob lists will still be up to date
# and no command-lines will have been changed. The action will not be
# re-run and the build will be broken. It is possible to get this correct
# using glob, and it's possible to mess it up without glob, but globs make
# this situation much easier to create. if the build always lists the
# files and passes them to a script, it will always be correct.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist +
angle_dotfile_settings.exec_script_whitelist +
[
# Whitelist entries for //build should go into
# //build/dotfile_settings.gni instead, so that they can be shared
# with other repos. The entries in this list should be only for files
# in the Chromium repo outside of //build.
"//build_overrides/build.gni",
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
# TODO(dgn): Layer violation but breaks the build otherwise, see
# https://crbug.com/474506.
"//clank/java/BUILD.gn",
"//clank/native/BUILD.gn",
"//google_apis/BUILD.gn",
"//printing/BUILD.gn",
"//remoting/host/installer/linux/BUILD.gn",
"//remoting/remoting_version.gni",
"//remoting/host/installer/win/generate_clsids.gni",
"//tools/grit/grit_rule.gni",
"//tools/gritsettings/BUILD.gn",
]

1257
src/AUTHORS Normal file

File diff suppressed because it is too large Load Diff

1442
src/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

4384
src/DEPS Normal file

File diff suppressed because it is too large Load Diff

27
src/LICENSE Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4293
src/base/BUILD.gn Normal file

File diff suppressed because it is too large Load Diff

22
src/base/DEPS Normal file
View File

@ -0,0 +1,22 @@
include_rules = [
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
"+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",
"+third_party/perfetto/include",
"+third_party/perfetto/protos/perfetto",
"+third_party/tcmalloc",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",
"-url",
# ICU dependendencies must be separate from the rest of base.
"-i18n",
# //base/util can use //base but not vice versa.
"-util",
]

3
src/base/DIR_METADATA Normal file
View File

@ -0,0 +1,3 @@
monorail {
component: "Internals>Core"
}

44
src/base/OWNERS Normal file
View File

@ -0,0 +1,44 @@
# See //base/README.md to find qualification for being an owner.
set noparent
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
ajwong@chromium.org
danakj@chromium.org
dcheng@chromium.org
fdoray@chromium.org
gab@chromium.org
jdoerrie@chromium.org
kylechar@chromium.org
mark@chromium.org
thakis@chromium.org
thestig@chromium.org
wez@chromium.org
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
# per-file rules:
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file BUILD.gn=*
# For Android-specific changes:
per-file *android*=file://base/android/OWNERS
per-file BUILD.gn=file://base/android/OWNERS
# For Fuchsia-specific changes:
per-file *_fuchsia*=file://build/fuchsia/OWNERS
# For Windows-specific changes:
per-file *_win*=file://base/win/OWNERS
per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
# Restricted since rand_util.h also backs the cryptographically secure RNG.
per-file rand_util*=set noparent
per-file rand_util*=file://ipc/SECURITY_OWNERS
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS
# For TCMalloc tests:
per-file security_unittest.cc=jln@chromium.org

140
src/base/PRESUBMIT.py Normal file
View File

@ -0,0 +1,140 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/base.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not "/ios/" in f.LocalPath() and
not "/test/" in f.LocalPath() and
not f.LocalPath().endswith('.java') and
not f.LocalPath().endswith('_unittest.mm') and
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
"""Returns locations matching one of the search_regexes."""
def FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
files_to_check=files_to_check,
files_to_skip=files_to_skip)
no_presubmit = r"// no-presubmit-check"
locations = []
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
for search_regex in search_regexes:
if (input_api.re.search(search_regex, line) and
not input_api.re.search(no_presubmit, line)):
locations.append(" %s:%d" % (f.LocalPath(), line_num))
break
return locations
def _CheckNoTraceEventInclude(input_api, output_api):
"""Verify that //base includes base_tracing.h instead of trace event headers.
Checks that files outside trace event implementation include the
base_tracing.h header instead of specific trace event implementation headers
to maintain compatibility with the gn flag "enable_base_tracing = false".
"""
discouraged_includes = [
r'^#include "base/trace_event/(?!base_tracing\.h)',
r'^#include "third_party/perfetto/include/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
]
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitError(
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
'of trace_event implementation headers. If you need to include an\n' +
'implementation header, verify that base_unittests still passes\n' +
'with gn arg "enable_base_tracing = false" and add\n' +
'"// no-presubmit-check" after the include. \n' +
'\n'.join(locations)) ]
return []
def _WarnPbzeroIncludes(input_api, output_api):
"""Warn to check enable_base_tracing=false when including a pbzero header.
Emits a warning when including a perfetto pbzero header, encouraging the
user to verify that //base still builds with enable_base_tracing=false.
"""
warn_includes = [
r'^#include "third_party/perfetto/protos/',
r'^#include "base/tracing/protos/',
]
files_to_check = [
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
]
locations = _FindLocations(input_api, warn_includes, files_to_check,
files_to_skip)
if locations:
return [ output_api.PresubmitPromptWarning(
'Please verify that base_unittests still builds & passes with gn\n' +
'arg "enable_base_tracing = false" when adding typed trace events\n' +
'to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)" to\n' +
'exclude pbzero headers and anything not supported by\n' +
'//base/trace_event/trace_event_stub.h.\n' +
'\n'.join(locations)) ]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
results.extend(_WarnPbzeroIncludes(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results

81
src/base/README.md Normal file
View File

@ -0,0 +1,81 @@
# What is this
Contains a written down set of principles and other information on //base.
Please add to it!
## About //base:
Chromium is a very mature project. Most things that are generally useful are
already here and things not here aren't generally useful.
The bar for adding stuff to base is that it must have demonstrated wide
applicability. Prefer to add things closer to where they're used (i.e. "not
base"), and pull into base only when needed. In a project our size,
sometimes even duplication is OK and inevitable.
Adding a new logging macro `DPVELOG_NE` is not more clear than just
writing the stuff you want to log in a regular logging statement, even
if it makes your calling code longer. Just add it to your own code.
If the code in question does not need to be used inside base, but will have
multiple consumers across the codebase, consider placing it in a new directory
under components/ instead.
base is written for the Chromium project and is not intended to be used
outside it. Using base outside of src.git is explicitly not supported,
and base makes no guarantees about API (or even ABI) stability (like all
other code in Chromium). New code that depends on base/ must be in
src.git. Code that's not in src.git but pulled in through DEPS (for
example, v8) cannot use base.
## Qualifications for being in //base OWNERS
* interest and ability to learn low level/high detail/complex c++ stuff
* inclination to always ask why and understand everything (including external
interactions like win32) rather than just hoping the author did it right
* mentorship/experience
* demonstrated good judgement (esp with regards to public APIs) over a length
of time
Owners are added when a contributor has shown the above qualifications and
when they express interest. There isn't an upper bound on the number of OWNERS.
## Design and naming
* Be sure to use the base namespace.
* STL-like constructs should adhere as closely to STL as possible. Functions
and behaviors not present in STL should only be added when they are related
to the specific data structure implemented by the container.
* For STL-like constructs our policy is that they should use STL-like naming
even when it may conflict with the style guide. So functions and class names
should be lower case with underscores. Non-STL-like classes and functions
should use Google naming.
## Performance testing
Since the primitives provided by //base are used very widely, it is important to
ensure they scale to the necessary workloads and perform well under all
supported platforms. The `base_perftests` target is a suite of
synthetic microbenchmarks that measure performance in various scenarios:
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
queue in isolation.
* ConditionVariablePerfTest: Measures thread switching cost of condition
variables.
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
machinery.
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
* MessageLoopPerfTest: Measures the speed of task posting in various
configurations.
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
pthreads.
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
underlying task runners.
* TaskObserverPerfTest: Measures the incremental cost of adding task
observers.
* TaskPerfTest: Checks the cost of posting tasks between threads.
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
multithreaded scenarios.
Regressions in these benchmarks can generally by caused by 1) operating system
changes, 2) compiler version or flag changes or 3) changes in //base code
itself.

14
src/base/SECURITY_OWNERS Normal file
View File

@ -0,0 +1,14 @@
# Changes to code that runs at high privilege and which has a high risk of
# memory corruption, such as parsers for complex inputs, require a security
# review to avoid introducing sandbox escapes.
#
# Although this file is in base/, it may apply to more than just base, OWNERS
# files outside of base may also include this file.
#
# Security team: If you are uncomfortable reviewing a particular bit of code
# yourself, don't hesitate to seek help from another security team member!
# Nobody knows everything, and the only way to learn is from experience.
dcheng@chromium.org
palmer@chromium.org
rsesek@chromium.org
tsepez@chromium.org

328
src/base/allocator/BUILD.gn Normal file
View File

@ -0,0 +1,328 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//base/allocator/allocator.gni")
import("//build/buildflag_header.gni")
import("//build/config/compiler/compiler.gni")
declare_args() {
# Provide a way to force disable debugallocation in Debug builds,
# e.g. for profiling (it's more rare to profile Debug builds,
# but people sometimes need to do that).
enable_debugallocation = is_debug
# Provide a way to build tcmalloc with a low memory footprint.
use_tcmalloc_small_but_slow = false
}
# This "allocator" meta-target will forward to the default allocator according
# to the build settings.
group("allocator") {
public_deps = []
deps = []
if (use_allocator == "tcmalloc") {
deps += [ ":tcmalloc" ]
}
}
config("tcmalloc_flags") {
defines = [
"TCMALLOC_USE_DOUBLYLINKED_FREELIST",
"TCMALLOC_DISABLE_HUGE_ALLOCATIONS",
]
if (enable_debugallocation) {
defines += [
# Use debugallocation for Debug builds to catch problems early
# and cleanly, http://crbug.com/30715 .
"TCMALLOC_FOR_DEBUGALLOCATION",
]
}
if (use_allocator_shim) {
defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
}
if (use_tcmalloc_small_but_slow) {
defines += [ "TCMALLOC_SMALL_BUT_SLOW" ]
}
if (is_clang) {
cflags = [
# tcmalloc initializes some fields in the wrong order.
"-Wno-reorder",
# tcmalloc contains some unused local template specializations.
"-Wno-unused-function",
# tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
"-Wno-unused-local-typedefs",
# for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
"-Wno-unused-private-field",
]
} else {
cflags = []
}
if (is_linux || is_chromeos || is_android) {
# We enable all warnings by default, but upstream disables a few.
# Keep "-Wno-*" flags in sync with upstream by comparing against:
# http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
cflags += [
"-Wno-sign-compare",
"-Wno-unused-result",
]
}
}
if (use_allocator == "tcmalloc") {
# tcmalloc currently won't compile on Android.
source_set("tcmalloc") {
tcmalloc_dir = "//third_party/tcmalloc/chromium"
# Don't check tcmalloc's includes. These files include various files like
# base/foo.h and they actually refer to tcmalloc's forked copy of base
# rather than the regular one, which confuses the header checker.
check_includes = false
sources = [
# Generated for our configuration from tcmalloc's build
# and checked in.
"$tcmalloc_dir/src/config.h",
# tcmalloc native and forked files.
"$tcmalloc_dir/src/base/abort.cc",
"$tcmalloc_dir/src/base/abort.h",
"$tcmalloc_dir/src/base/arm_instruction_set_select.h",
"$tcmalloc_dir/src/base/atomicops-internals-arm-generic.h",
"$tcmalloc_dir/src/base/atomicops-internals-arm-v6plus.h",
"$tcmalloc_dir/src/base/atomicops-internals-linuxppc.h",
"$tcmalloc_dir/src/base/atomicops-internals-macosx.h",
"$tcmalloc_dir/src/base/atomicops-internals-windows.h",
"$tcmalloc_dir/src/base/atomicops-internals-x86.cc",
"$tcmalloc_dir/src/base/atomicops-internals-x86.h",
"$tcmalloc_dir/src/base/atomicops.h",
"$tcmalloc_dir/src/base/commandlineflags.h",
# We don't list dynamic_annotations.c since its copy is already
# present in the dynamic_annotations target.
"$tcmalloc_dir/src/base/elf_mem_image.cc",
"$tcmalloc_dir/src/base/elf_mem_image.h",
"$tcmalloc_dir/src/base/linuxthreads.cc",
"$tcmalloc_dir/src/base/linuxthreads.h",
"$tcmalloc_dir/src/base/logging.cc",
"$tcmalloc_dir/src/base/logging.h",
"$tcmalloc_dir/src/base/low_level_alloc.cc",
"$tcmalloc_dir/src/base/low_level_alloc.h",
"$tcmalloc_dir/src/base/spinlock.cc",
"$tcmalloc_dir/src/base/spinlock.h",
"$tcmalloc_dir/src/base/spinlock_internal.cc",
"$tcmalloc_dir/src/base/spinlock_internal.h",
"$tcmalloc_dir/src/base/sysinfo.cc",
"$tcmalloc_dir/src/base/sysinfo.h",
"$tcmalloc_dir/src/base/vdso_support.cc",
"$tcmalloc_dir/src/base/vdso_support.h",
"$tcmalloc_dir/src/central_freelist.cc",
"$tcmalloc_dir/src/central_freelist.h",
"$tcmalloc_dir/src/common.cc",
"$tcmalloc_dir/src/common.h",
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/debugallocation.cc",
"$tcmalloc_dir/src/fake_stacktrace_scope.cc",
"$tcmalloc_dir/src/free_list.cc",
"$tcmalloc_dir/src/free_list.h",
"$tcmalloc_dir/src/gperftools/heap-profiler.h",
"$tcmalloc_dir/src/gperftools/malloc_extension.h",
"$tcmalloc_dir/src/gperftools/malloc_hook.h",
"$tcmalloc_dir/src/gperftools/stacktrace.h",
"$tcmalloc_dir/src/internal_logging.cc",
"$tcmalloc_dir/src/internal_logging.h",
"$tcmalloc_dir/src/linked_list.h",
"$tcmalloc_dir/src/malloc_extension.cc",
"$tcmalloc_dir/src/malloc_hook-inl.h",
"$tcmalloc_dir/src/malloc_hook.cc",
"$tcmalloc_dir/src/maybe_emergency_malloc.h",
"$tcmalloc_dir/src/maybe_threads.cc",
"$tcmalloc_dir/src/maybe_threads.h",
"$tcmalloc_dir/src/page_heap.cc",
"$tcmalloc_dir/src/page_heap.h",
"$tcmalloc_dir/src/raw_printer.cc",
"$tcmalloc_dir/src/raw_printer.h",
"$tcmalloc_dir/src/sampler.cc",
"$tcmalloc_dir/src/sampler.h",
"$tcmalloc_dir/src/span.cc",
"$tcmalloc_dir/src/span.h",
"$tcmalloc_dir/src/stack_trace_table.cc",
"$tcmalloc_dir/src/stack_trace_table.h",
"$tcmalloc_dir/src/stacktrace.cc",
"$tcmalloc_dir/src/static_vars.cc",
"$tcmalloc_dir/src/static_vars.h",
"$tcmalloc_dir/src/symbolize.cc",
"$tcmalloc_dir/src/symbolize.h",
"$tcmalloc_dir/src/system-alloc.cc",
"$tcmalloc_dir/src/system-alloc.h",
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/tcmalloc.cc",
#"$tcmalloc_dir/src/tcmalloc.h",
"$tcmalloc_dir/src/thread_cache.cc",
"$tcmalloc_dir/src/thread_cache.h",
"$tcmalloc_dir/src/windows/port.cc",
"$tcmalloc_dir/src/windows/port.h",
"debugallocation_shim.cc",
# These are both #included by allocator_shim for maximal linking.
#"generic_allocators.cc",
#"win_allocator.cc",
]
if (is_android) {
sources += [ "$tcmalloc_dir/src/config_android.h" ]
}
if (is_linux || is_chromeos) {
sources += [ "$tcmalloc_dir/src/config_linux.h" ]
}
if (is_win) {
sources += [ "$tcmalloc_dir/src/config_win.h" ]
}
# Not included on mips64el.
if (current_cpu == "mips64el") {
sources -= [
"$tcmalloc_dir/src/base/linuxthreads.cc",
"$tcmalloc_dir/src/base/linuxthreads.h",
]
}
# Disable the heap checker in tcmalloc.
defines = [ "NO_HEAP_CHECK" ]
include_dirs = [
".",
"$tcmalloc_dir/src/base",
"$tcmalloc_dir/src",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [
"//build/config/compiler:no_chromium_code",
":tcmalloc_flags",
]
# Thumb mode disabled due to bug in clang integrated assembler
# TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
configs += [ "//build/config/compiler:compiler_arm" ]
# TODO(crbug.com/633719) Make tcmalloc work with AFDO on GCC if possible.
if (!is_clang) {
configs -= [ "//build/config/compiler:afdo" ]
}
deps = [
":buildflags",
"//build:chromeos_buildflags",
]
if (enable_profiling) {
sources += [
"$tcmalloc_dir/src/base/thread_lister.c",
"$tcmalloc_dir/src/base/thread_lister.h",
"$tcmalloc_dir/src/heap-profile-table.cc",
"$tcmalloc_dir/src/heap-profile-table.h",
"$tcmalloc_dir/src/heap-profiler.cc",
"$tcmalloc_dir/src/memory_region_map.cc",
"$tcmalloc_dir/src/memory_region_map.h",
"$tcmalloc_dir/src/profile-handler.cc",
"$tcmalloc_dir/src/profile-handler.h",
"$tcmalloc_dir/src/profiledata.cc",
"$tcmalloc_dir/src/profiledata.h",
"$tcmalloc_dir/src/profiler.cc",
]
defines += [ "ENABLE_PROFILING=1" ]
}
if (is_linux || is_chromeos || is_android) {
sources -= [
"$tcmalloc_dir/src/system-alloc.h",
"$tcmalloc_dir/src/windows/port.cc",
"$tcmalloc_dir/src/windows/port.h",
]
# Compiling tcmalloc with -fvisibility=default is only necessary when
# not using the allocator shim, which provides the correct visibility
# annotations for those symbols which need to be exported (see
# //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
# //base/allocator/allocator_shim_internals.h for the definition of
# SHIM_ALWAYS_EXPORT).
if (!use_allocator_shim) {
configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
ldflags = [
# Don't let linker rip this symbol out, otherwise the heap&cpu
# profilers will not initialize properly on startup.
"-Wl,-uIsHeapProfilerRunning,-uProfilerStart",
# Do the same for heap leak checker.
"-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi",
"-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl",
"-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv",
]
}
# Make sure the allocation library is optimized as much as possible when
# we"re in release mode.
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
deps += [ "//base/third_party/dynamic_annotations" ]
}
} # use_allocator == "tcmalloc"
buildflag_header("buildflags") {
header = "buildflags.h"
_use_partition_alloc = use_allocator == "partition"
_use_tcmalloc = use_allocator == "tcmalloc"
assert(use_allocator_shim || !_use_partition_alloc,
"Partition alloc requires the allocator shim")
flags = [
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_TCMALLOC=$_use_tcmalloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc",
]
}
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
config("wrap_malloc_symbols") {
ldflags = [
"-Wl,-wrap,calloc",
"-Wl,-wrap,free",
"-Wl,-wrap,malloc",
"-Wl,-wrap,memalign",
"-Wl,-wrap,posix_memalign",
"-Wl,-wrap,pvalloc",
"-Wl,-wrap,realloc",
"-Wl,-wrap,valloc",
# <stdlib.h> functions
"-Wl,-wrap,realpath",
# <string.h> functions
"-Wl,-wrap,strdup",
"-Wl,-wrap,strndup",
# <unistd.h> functions
"-Wl,-wrap,getcwd",
# <stdio.h> functions
"-Wl,-wrap,asprintf",
"-Wl,-wrap,vasprintf",
]
}

View File

@ -0,0 +1,3 @@
monorail {
component: "Internals"
}

View File

@ -0,0 +1,3 @@
lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org

View File

@ -0,0 +1,188 @@
This document describes how malloc / new calls are routed in the various Chrome
platforms.
Bare in mind that the chromium codebase does not always just use `malloc()`.
Some examples:
- Large parts of the renderer (Blink) use two home-brewed allocators,
PartitionAlloc and BlinkGC (Oilpan).
- Some subsystems, such as the V8 JavaScript engine, handle memory management
autonomously.
- Various parts of the codebase use abstractions such as `SharedMemory` or
`DiscardableMemory` which, similarly to the above, have their own page-level
memory management.
Background
----------
The `allocator` target defines at compile-time the platform-specific choice of
the allocator and extra-hooks which services calls to malloc/new. The relevant
build-time flags involved are `use_allocator` and `use_allocator_shim`.
The default choices are as follows:
**Windows**
`use_allocator: winheap`, the default Windows heap.
Additionally, `static_library` (i.e. non-component) builds have a shim
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
The shim layer provides extra security features, such as preventing large
allocations that can hit signed vs. unsigned bugs in third_party code.
**Linux Desktop / CrOS**
`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
to fall back to the system (Glibc) symbols.
**Android**
`use_allocator: none`, always use the allocator symbols coming from Android's
libc (Bionic). As it is developed as part of the OS, it is considered to be
optimized for small devices and more memory-efficient than other choices.
The actual implementation backing malloc symbols in Bionic is up to the board
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
In addition, when building for `asan` / `msan` both the allocator and the shim
layer are disabled.
Layering and build deps
-----------------------
The `allocator` target provides both the source files for tcmalloc (where
applicable) and the linker flags required for the Windows shim layer.
The `base` target is (almost) the only one depending on `allocator`. No other
targets should depend on it, with the exception of the very few executables /
dynamic libraries that don't depend, either directly or indirectly, on `base`
within the scope of a linker unit.
More importantly, **no other place outside of `/base` should depend on the
specific allocator** (e.g., directly include `third_party/tcmalloc`).
If such a functional dependency is required that should be achieved using
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
`/base/memory/`)
**Why `base` depends on `allocator`?**
Because it needs to provide services that depend on the actual allocator
implementation. In the past `base` used to pretend to be allocator-agnostic
and get the dependencies injected by other layers. This ended up being an
inconsistent mess.
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
Linker unit targets (executables and shared libraries) that depend in some way
on `base` (most of the targets in the codebase) get automatically the correct
set of linker flags to pull in tcmalloc or the Windows shim-layer.
Source code
-----------
This directory contains just the allocator (i.e. shim) layer that switches
between the different underlying memory allocation implementations.
The tcmalloc library originates outside of Chromium and exists in
`../../third_party/tcmalloc` (currently, the actual location is defined in the
allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
track Chromium-specific changes independently from upstream changes.
The general intent is to push local changes upstream so that over
time we no longer need any forked files.
Unified allocator shim
----------------------
On most platforms, Chrome overrides the malloc / operator new symbols (and
corresponding free / delete and other variants). This is to enforce security
checks and lately to enable the
[memory-infra heap profiler][url-memory-infra-heap-profiler].
Historically each platform had its special logic for defining the allocator
symbols in different places of the codebase. The unified allocator shim is
a project aimed to unify the symbol definition and allocator routing logic in
a central place.
- Full documentation: [Allocator shim design doc][url-allocator-shim].
- Current state: Available and enabled by default on Android, CrOS, Linux,
Mac OS and Windows.
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
- Build-time flag: `use_allocator_shim`.
**Overview of the unified allocator shim**
The allocator shim consists of three stages:
```
+-------------------------+ +-----------------------+ +----------------+
| malloc & friends | -> | shim layer | -> | Routing to |
| symbols definition | | implementation | | allocator |
+-------------------------+ +-----------------------+ +----------------+
| - libc symbols (malloc, | | - Security checks | | - tcmalloc |
| calloc, free, ...) | | - Chain of dispatchers| | - glibc |
| - C++ symbols (operator | | that can intercept | | - Android |
| new, delete, ...) | | and override | | bionic |
| - glibc weak symbols | | allocations | | - WinHeap |
| (__libc_malloc, ...) | +-----------------------+ +----------------+
+-------------------------+
```
**1. malloc symbols definition**
This stage takes care of overriding the symbols `malloc`, `free`,
`operator new`, `operator delete` and friends and routing those calls inside the
allocator shim (next point).
This is taken care of by the headers in `allocator_shim_override_*`.
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
can override in `allocator_shim_override_ucr_symbols_win.h`.
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
`operator delete` and friends).
This enables proper interposition of malloc symbols referenced by the main
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
Additionally, when tcmalloc is the default allocator, some extra glibc symbols
are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
reasons explained in that file.
The Linux/CrOS shim was introduced by
[crrev.com/1675143004](https://crrev.com/1675143004).
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
possible. This is because Android processes are `fork()`-ed from the Android
zygote, which pre-loads libc.so and only later native code gets loaded via
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
scope).
In this case, the approach instead of wrapping symbol resolution at link time
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
The use of this wrapping flag causes:
- All references to allocator symbols in the Chrome codebase to be rewritten as
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
route allocator calls inside the shim layer.
- The reference to the original `malloc` symbols (which typically is defined by
the system's libc.so) are accessible via the special `__real_malloc` and
friends symbols (which will be relocated, at load time, against `malloc`).
In summary, this approach is transparent to the dynamic loader, which still sees
undefined symbol references to malloc symbols.
These symbols will be resolved against libc.so as usual.
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
**2. Shim layer implementation**
This stage contains the actual shim implementation. This consists of:
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
(using the `InsertAllocatorDispatch` API). They can intercept and override
allocator calls.
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
This happens inside `allocator_shim.cc`
**3. Final allocator routing**
The final element of the aforementioned dispatcher chain is statically defined
at build time and ultimately routes the allocator calls to the actual allocator
(as described in the *Background* section above). This is taken care of by the
headers in `allocator_shim_default_dispatch_to_*` files.
Related links
-------------
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing

View File

@ -0,0 +1,89 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/chromecast_build.gni")
import("//build/config/sanitizers/sanitizers.gni")
# Sanitizers replace the allocator, don't use our own.
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
# - Windows: shims don't work for component builds and debug CRT is not
# compatible, see below.
# - Android: symbol wrapping is not universal for component builds.
# - Chromecast on Android: causes issues with crash reporting, see b/178423326.
_disable_partition_alloc =
(is_win && (is_component_build || is_debug)) ||
(is_android && is_component_build) || (is_android && is_chromecast) ||
(is_linux && is_component_build)
_is_partition_alloc_platform = is_android || is_win || is_linux
# The debug CRT on Windows has some debug features that are incompatible with
# the shim. NaCl in particular does seem to link some binaries statically
# against the debug CRT with "is_nacl=false".
if ((is_linux || is_chromeos || is_android || is_apple ||
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
_default_use_allocator_shim = true
} else {
_default_use_allocator_shim = false
}
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
!_disable_partition_alloc) {
_default_allocator = "partition"
} else if (is_android || is_apple || _is_using_sanitizers || is_win ||
is_fuchsia || ((is_linux || is_chromeos) && target_cpu == "arm64") ||
(is_cast_audio_only && target_cpu == "arm")) {
# Temporarily disable tcmalloc on arm64 linux to get rid of compilation
# errors.
_default_allocator = "none"
} else {
_default_allocator = "tcmalloc"
}
declare_args() {
# Memory allocator to use. Set to "none" to use default allocator.
use_allocator = _default_allocator
# Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = _default_use_allocator_shim
# Whether PartitionAlloc should be available for use or not.
# true makes PartitionAlloc linked to the executable or shared library and
# makes it available for use, but it doesn't mean that the default allocator
# is PartitionAlloc. PartitionAlloc may or may not be the default allocator.
#
# |use_allocator = "partition"| makes PartitionAlloc the default allocator
# but it's effective only when |use_partition_alloc = true|.
#
# TODO(lizeb, yukishiino): Determine if |use_partition_alloc| is necessary or
# not, and redesign or remove the flag accordingly. We may want to assert a
# possible conflict between |use_allocator = "partition"| and
# |use_partition_alloc = true| rather than prioritizing use_partition_alloc.
use_partition_alloc = !is_ios # Never use PartitionAlloc on iOS.
}
if (!use_partition_alloc && use_allocator == "partition") {
# If there is a conflict, prioritize |use_partition_alloc| over
# |use_allocator|.
use_allocator = "none"
}
assert(use_allocator == "none" || use_allocator == "tcmalloc" ||
use_allocator == "partition")
assert(!is_win || use_allocator != "tcmalloc",
"Tcmalloc doesn't work on Windows.")
assert(!is_mac || use_allocator != "tcmalloc",
"Tcmalloc doesn't work on macOS.")
assert(!is_ios || use_allocator != "tcmalloc", "Tcmalloc doesn't work on iOS.")
assert(
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
is_apple,
"use_allocator_shim works only on Android, iOS, Linux, macOS, and Windows.")
if (is_win && use_allocator_shim) {
assert(!is_component_build,
"The allocator shim doesn't work for the component build on Windows.")
}

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_check.h"
#include "base/allocator/buildflags.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/allocator/winheap_stubs_win.h"
#endif
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
#include <malloc.h>
#endif
#if defined(OS_APPLE)
#include "base/allocator/allocator_interception_mac.h"
#endif
namespace base {
namespace allocator {
bool IsAllocatorInitialized() {
#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
// Set by allocator_shim_override_ucrt_symbols_win.h when the
// shimmed _set_new_mode() is called.
return g_is_win_shim_layer_initialized;
#elif (defined(OS_LINUX) || defined(OS_CHROMEOS)) && \
BUILDFLAG(USE_TCMALLOC) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// From third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h.
// TODO(primiano): replace with an include once base can depend on allocator.
#define TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC 0xbeef42
return (mallopt(TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC, 0) ==
TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC);
#elif defined(OS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// From allocator_interception_mac.mm.
return base::allocator::g_replaced_default_zone;
#else
return true;
#endif
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_
#define BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_
#include "base/base_export.h"
namespace base {
namespace allocator {
BASE_EXPORT bool IsAllocatorInitialized();
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_

View File

@ -0,0 +1,77 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_extension.h"
#include "base/allocator/buildflags.h"
#include "base/check.h"
#if BUILDFLAG(USE_TCMALLOC)
#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
#endif
namespace base {
namespace allocator {
void ReleaseFreeMemory() {
#if BUILDFLAG(USE_TCMALLOC)
::MallocExtension::instance()->ReleaseFreeMemory();
#endif
}
bool GetNumericProperty(const char* name, size_t* value) {
#if BUILDFLAG(USE_TCMALLOC)
return ::MallocExtension::instance()->GetNumericProperty(name, value);
#else
return false;
#endif
}
bool SetNumericProperty(const char* name, size_t value) {
#if BUILDFLAG(USE_TCMALLOC)
return ::MallocExtension::instance()->SetNumericProperty(name, value);
#else
return false;
#endif
}
void GetHeapSample(std::string* writer) {
#if BUILDFLAG(USE_TCMALLOC)
::MallocExtension::instance()->GetHeapSample(writer);
#endif
}
bool IsHeapProfilerRunning() {
#if BUILDFLAG(USE_TCMALLOC) && defined(ENABLE_PROFILING)
return ::IsHeapProfilerRunning();
#else
return false;
#endif
}
void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
// TODO(sque): Use allocator shim layer instead.
#if BUILDFLAG(USE_TCMALLOC)
// Make sure no hooks get overwritten.
auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
if (alloc_hook)
DCHECK(!prev_alloc_hook);
auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
if (free_hook)
DCHECK(!prev_free_hook);
#endif
}
int GetCallStack(void** stack, int max_stack_size) {
#if BUILDFLAG(USE_TCMALLOC)
return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
#else
return 0;
#endif
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,67 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
#include <stddef.h> // for size_t
#include <string>
#include "base/base_export.h"
#include "build/build_config.h"
namespace base {
namespace allocator {
// Callback types for alloc and free.
using AllocHookFunc = void (*)(const void*, size_t);
using FreeHookFunc = void (*)(const void*);
// Request that the allocator release any free memory it knows about to the
// system.
BASE_EXPORT void ReleaseFreeMemory();
// Get the named property's |value|. Returns true if the property is known.
// Returns false if the property is not a valid property name for the current
// allocator implementation.
// |name| or |value| cannot be NULL
BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
// Set the named property's |value|. Returns true if the property is known and
// writable. Returns false if the property is not a valid property name for the
// current allocator implementation, or is not writable. |name| cannot be NULL.
BASE_EXPORT bool SetNumericProperty(const char* name, size_t value);
// Outputs to |writer| a sample of live objects and the stack traces
// that allocated these objects. The format of the returned output
// is equivalent to the output of the heap profiler and can
// therefore be passed to "pprof".
// NOTE: by default, the allocator does not do any heap sampling, and this
// function will always return an empty sample. To get useful
// data from GetHeapSample, you must also set the numeric property
// "tcmalloc.sampling_period_bytes" to a value such as 524288.
BASE_EXPORT void GetHeapSample(std::string* writer);
BASE_EXPORT bool IsHeapProfilerRunning();
// Register callbacks for alloc and free. Can only store one callback at a time
// for each of alloc and free.
BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
// Attempts to unwind the call stack from the current location where this
// function is being called from. Must be called from a hook function registered
// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
//
// Arguments:
// stack: pointer to a pre-allocated array of void*'s.
// max_stack_size: indicates the size of the array in |stack|.
//
// Returns the number of call stack frames stored in |stack|, or 0 if no call
// stack information is available.
BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_

View File

@ -0,0 +1,61 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
#include <stddef.h>
#include "base/base_export.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
struct MallocZoneFunctions;
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
// the functions in the current default malloc zone. This must be called before
// the default malloc zone is changed to have its intended effect.
void InitializeDefaultDispatchToMacAllocator();
// Saves the function pointers currently used by the default zone.
void StoreFunctionsForDefaultZone();
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
void StoreFunctionsForAllZones();
// For all malloc zones that have been stored, replace their functions with
// |functions|.
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
extern bool g_replaced_default_zone;
// Calls the original implementation of malloc/calloc prior to interception.
bool UncheckedMallocMac(size_t size, void** result);
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
// Foundation and Objective-C allocations.
// Has no effect on the default malloc zone if the allocator shim already
// performs that interception.
BASE_EXPORT void InterceptAllocationsMac();
// Updates all malloc zones to use their original functions.
// Also calls ClearAllMallocZonesForTesting.
BASE_EXPORT void UninterceptMallocZonesForTesting();
// Periodically checks for, and shims new malloc zones. Stops checking after 1
// minute.
BASE_EXPORT void PeriodicallyShimNewMallocZones();
// Exposed for testing.
BASE_EXPORT void ShimNewMallocZones();
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
const MallocZoneFunctions* functions);
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_

View File

@ -0,0 +1,579 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains all the logic necessary to intercept allocations on
// macOS. "malloc zones" are an abstraction that allows the process to intercept
// all malloc-related functions. There is no good mechanism [short of
// interposition] to determine new malloc zones are added, so there's no clean
// mechanism to intercept all malloc zones. This file contains logic to
// intercept the default and purgeable zones, which always exist. A cursory
// review of Chrome seems to imply that non-default zones are almost never used.
//
// This file also contains logic to intercept Core Foundation and Objective-C
// allocations. The implementations forward to the default malloc zone, so the
// only reason to intercept these calls is to re-label OOM crashes with slightly
// more details.
#include "base/allocator/allocator_interception_mac.h"
#include <CoreFoundation/CoreFoundation.h>
#import <Foundation/Foundation.h>
#include <errno.h>
#include <mach/mach.h>
#import <objc/runtime.h>
#include <stddef.h>
#include <new>
#include "base/allocator/buildflags.h"
#include "base/allocator/malloc_zone_functions_mac.h"
#include "base/bind.h"
#include "base/bits.h"
#include "base/logging.h"
#include "base/mac/mach_logging.h"
#include "base/process/memory.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "build/build_config.h"
#include "third_party/apple_apsl/CFBase.h"
#if defined(OS_IOS)
#include "base/ios/ios_util.h"
#else
#include "base/mac/mac_util.h"
#endif
namespace base {
namespace allocator {
bool g_replaced_default_zone = false;
namespace {
bool g_oom_killer_enabled;
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
// read-only, to prevent them from being overwritten in an attack. However,
// blindly unprotecting and reprotecting the zone allocators fails with
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
// memory in its bss. Explicit saving/restoring of the protection is required.
//
// This function takes a pointer to a malloc zone, de-protects it if necessary,
// and returns (in the out parameters) a region of memory (if any) to be
// re-protected when modifications are complete. This approach assumes that
// there is no contention for the protection of this memory.
void DeprotectMallocZone(ChromeMallocZone* default_zone,
vm_address_t* reprotection_start,
vm_size_t* reprotection_length,
vm_prot_t* reprotection_value) {
mach_port_t unused;
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
struct vm_region_basic_info_64 info;
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
kern_return_t result =
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
VM_REGION_BASIC_INFO_64,
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
MACH_CHECK(result == KERN_SUCCESS, result) << "vm_region_64";
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
// balance it with a deallocate in case this ever changes. See
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
mach_port_deallocate(mach_task_self(), unused);
// Does the region fully enclose the zone pointers? Possibly unwarranted
// simplification used: using the size of a full version 10 malloc zone rather
// than the actual smaller size if the passed-in zone is not version 10.
CHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
reinterpret_cast<vm_address_t>(*reprotection_start);
CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
if (info.protection & VM_PROT_WRITE) {
// No change needed; the zone is already writable.
*reprotection_start = 0;
*reprotection_length = 0;
*reprotection_value = VM_PROT_NONE;
} else {
*reprotection_value = info.protection;
result =
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
false, info.protection | VM_PROT_WRITE);
MACH_CHECK(result == KERN_SUCCESS, result) << "vm_protect";
}
}
#if !defined(ADDRESS_SANITIZER)
MallocZoneFunctions g_old_zone;
MallocZoneFunctions g_old_purgeable_zone;
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_zone.malloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_calloc(struct _malloc_zone_t* zone,
size_t num_items,
size_t size) {
void* result = g_old_zone.calloc(zone, num_items, size);
if (!result && num_items && size)
TerminateBecauseOutOfMemory(num_items * size);
return result;
}
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_zone.valloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
g_old_zone.free(zone, ptr);
}
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
void* result = g_old_zone.realloc(zone, ptr, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_memalign(struct _malloc_zone_t* zone,
size_t alignment,
size_t size) {
void* result = g_old_zone.memalign(zone, alignment, size);
// Only die if posix_memalign would have returned ENOMEM, since there are
// other reasons why null might be returned. See posix_memalign() in 10.15's
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
if (!result && size && alignment >= sizeof(void*) &&
base::bits::IsPowerOfTwo(alignment)) {
TerminateBecauseOutOfMemory(size);
}
return result;
}
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_purgeable_zone.malloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
size_t num_items,
size_t size) {
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
if (!result && num_items && size)
TerminateBecauseOutOfMemory(num_items * size);
return result;
}
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
void* result = g_old_purgeable_zone.valloc(zone, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
g_old_purgeable_zone.free(zone, ptr);
}
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
void* ptr,
size_t size) {
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
if (!result && size)
TerminateBecauseOutOfMemory(size);
return result;
}
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
size_t alignment,
size_t size) {
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
// Only die if posix_memalign would have returned ENOMEM, since there are
// other reasons why null might be returned. See posix_memalign() in 10.15's
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
if (!result && size && alignment >= sizeof(void*) &&
base::bits::IsPowerOfTwo(alignment)) {
TerminateBecauseOutOfMemory(size);
}
return result;
}
#endif // !defined(ADDRESS_SANITIZER)
#if !defined(ADDRESS_SANITIZER)
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
#if defined(OS_IOS)
return !base::ios::IsRunningOnOrLater(14, 0, 0);
#else
return !base::mac::IsOSLaterThan11_DontCallThis();
#endif
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
return &our_allocator->_context;
}
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
CFOptionFlags hint,
void* info) {
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
#endif // !defined(ADDRESS_SANITIZER)
// === Cocoa NSObject allocation ===
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
allocWithZone_t g_old_allocWithZone;
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
id result = g_old_allocWithZone(self, _cmd, zone);
if (!result)
TerminateBecauseOutOfMemory(0);
return result;
}
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
if (!IsMallocZoneAlreadyStored(chrome_zone))
return;
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
ReplaceZoneFunctions(chrome_zone, &functions);
}
} // namespace
bool UncheckedMallocMac(size_t size, void** result) {
#if defined(ADDRESS_SANITIZER)
*result = malloc(size);
#else
if (g_old_zone.malloc) {
*result = g_old_zone.malloc(malloc_default_zone(), size);
} else {
*result = malloc(size);
}
#endif // defined(ADDRESS_SANITIZER)
return *result != NULL;
}
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
#if defined(ADDRESS_SANITIZER)
*result = calloc(num_items, size);
#else
if (g_old_zone.calloc) {
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
} else {
*result = calloc(num_items, size);
}
#endif // defined(ADDRESS_SANITIZER)
return *result != NULL;
}
void InitializeDefaultDispatchToMacAllocator() {
StoreFunctionsForAllZones();
}
void StoreFunctionsForDefaultZone() {
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
malloc_default_zone());
StoreMallocZone(default_zone);
}
void StoreFunctionsForAllZones() {
// This ensures that the default zone is always at the front of the array,
// which is important for performance.
StoreFunctionsForDefaultZone();
vm_address_t* zones;
unsigned int count;
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
if (kr != KERN_SUCCESS)
return;
for (unsigned int i = 0; i < count; ++i) {
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
StoreMallocZone(zone);
}
}
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
// The default zone does not get returned in malloc_get_all_zones().
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
ReplaceZoneFunctions(default_zone, functions);
}
vm_address_t* zones;
unsigned int count;
kern_return_t kr =
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
if (kr != KERN_SUCCESS)
return;
for (unsigned int i = 0; i < count; ++i) {
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
if (DoesMallocZoneNeedReplacing(zone, functions)) {
ReplaceZoneFunctions(zone, functions);
}
}
g_replaced_default_zone = true;
}
void InterceptAllocationsMac() {
if (g_oom_killer_enabled)
return;
g_oom_killer_enabled = true;
// === C malloc/calloc/valloc/realloc/posix_memalign ===
// This approach is not perfect, as requests for amounts of memory larger than
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
// fail with a NULL rather than dying (see malloc_zone_malloc() in
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
// details). Unfortunately, it's the best we can do. Also note that this does
// not affect allocations from non-default zones.
#if !defined(ADDRESS_SANITIZER)
// Don't do anything special on OOM for the malloc zones replaced by
// AddressSanitizer, as modifying or protecting them may not work correctly.
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
if (!IsMallocZoneAlreadyStored(default_zone)) {
StoreZoneFunctions(default_zone, &g_old_zone);
MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc;
new_functions.calloc = oom_killer_calloc;
new_functions.valloc = oom_killer_valloc;
new_functions.free = oom_killer_free;
new_functions.realloc = oom_killer_realloc;
new_functions.memalign = oom_killer_memalign;
ReplaceZoneFunctions(default_zone, &new_functions);
g_replaced_default_zone = true;
}
ChromeMallocZone* purgeable_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc_purgeable;
new_functions.calloc = oom_killer_calloc_purgeable;
new_functions.valloc = oom_killer_valloc_purgeable;
new_functions.free = oom_killer_free_purgeable;
new_functions.realloc = oom_killer_realloc_purgeable;
new_functions.memalign = oom_killer_memalign_purgeable;
ReplaceZoneFunctions(purgeable_zone, &new_functions);
}
#endif
// === C malloc_zone_batch_malloc ===
// batch_malloc is omitted because the default malloc zone's implementation
// only supports batch_malloc for "tiny" allocations from the free list. It
// will fail for allocations larger than "tiny", and will only allocate as
// many blocks as it's able to from the free list. These factors mean that it
// can return less than the requested memory even in a non-out-of-memory
// situation. There's no good way to detect whether a batch_malloc failure is
// due to these other factors, or due to genuine memory or address space
// exhaustion. The fact that it only allocates space from the "tiny" free list
// means that it's likely that a failure will not be due to memory exhaustion.
// Similarly, these constraints on batch_malloc mean that callers must always
// be expecting to receive less memory than was requested, even in situations
// where memory pressure is not a concern. Finally, the only public interface
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
// system's malloc implementation. It's unlikely that anyone's even heard of
// it.
#ifndef ADDRESS_SANITIZER
// === Core Foundation CFAllocators ===
// This will not catch allocation done by custom allocators, but will catch
// all allocation done by system-provided ones.
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
!g_old_cfallocator_malloc_zone)
<< "Old allocators unexpectedly non-null";
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
if (cf_allocator_internals_known) {
CFAllocatorContext* context =
ContextForCFAllocator(kCFAllocatorSystemDefault);
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
g_old_cfallocator_system_default = context->allocate;
CHECK(g_old_cfallocator_system_default)
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
context->allocate = oom_killer_cfallocator_system_default;
context = ContextForCFAllocator(kCFAllocatorMalloc);
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
g_old_cfallocator_malloc = context->allocate;
CHECK(g_old_cfallocator_malloc)
<< "Failed to get kCFAllocatorMalloc allocation function.";
context->allocate = oom_killer_cfallocator_malloc;
context = ContextForCFAllocator(kCFAllocatorMallocZone);
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
g_old_cfallocator_malloc_zone = context->allocate;
CHECK(g_old_cfallocator_malloc_zone)
<< "Failed to get kCFAllocatorMallocZone allocation function.";
context->allocate = oom_killer_cfallocator_malloc_zone;
} else {
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
"failures via CFAllocator will not result in termination. "
"http://crbug.com/45650";
}
#endif
// === Cocoa NSObject allocation ===
// Note that both +[NSObject new] and +[NSObject alloc] call through to
// +[NSObject allocWithZone:].
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
Class nsobject_class = [NSObject class];
Method orig_method =
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
g_old_allocWithZone =
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
CHECK(g_old_allocWithZone)
<< "Failed to get allocWithZone allocation function.";
method_setImplementation(orig_method,
reinterpret_cast<IMP>(oom_killer_allocWithZone));
}
void UninterceptMallocZonesForTesting() {
UninterceptMallocZoneForTesting(malloc_default_zone());
vm_address_t* zones;
unsigned int count;
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
CHECK(kr == KERN_SUCCESS);
for (unsigned int i = 0; i < count; ++i) {
UninterceptMallocZoneForTesting(
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
}
ClearAllMallocZonesForTesting();
}
namespace {
void ShimNewMallocZonesAndReschedule(base::Time end_time,
base::TimeDelta delay) {
ShimNewMallocZones();
if (base::Time::Now() > end_time)
return;
base::TimeDelta next_delay = delay * 2;
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
delay);
}
} // namespace
void PeriodicallyShimNewMallocZones() {
base::Time end_time = base::Time::Now() + base::TimeDelta::FromMinutes(1);
base::TimeDelta initial_delay = base::TimeDelta::FromSeconds(1);
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
}
void ShimNewMallocZones() {
StoreFunctionsForAllZones();
// Use the functions for the default zone as a template to replace those
// new zones.
ChromeMallocZone* default_zone =
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
DCHECK(IsMallocZoneAlreadyStored(default_zone));
MallocZoneFunctions new_functions;
StoreZoneFunctions(default_zone, &new_functions);
ReplaceFunctionsForStoredZones(&new_functions);
}
void ReplaceZoneFunctions(ChromeMallocZone* zone,
const MallocZoneFunctions* functions) {
// Remove protection.
vm_address_t reprotection_start = 0;
vm_size_t reprotection_length = 0;
vm_prot_t reprotection_value = VM_PROT_NONE;
DeprotectMallocZone(zone, &reprotection_start, &reprotection_length,
&reprotection_value);
CHECK(functions->malloc && functions->calloc && functions->valloc &&
functions->free && functions->realloc);
zone->malloc = functions->malloc;
zone->calloc = functions->calloc;
zone->valloc = functions->valloc;
zone->free = functions->free;
zone->realloc = functions->realloc;
if (functions->batch_malloc)
zone->batch_malloc = functions->batch_malloc;
if (functions->batch_free)
zone->batch_free = functions->batch_free;
if (functions->size)
zone->size = functions->size;
if (zone->version >= 5 && functions->memalign) {
zone->memalign = functions->memalign;
}
if (zone->version >= 6 && functions->free_definite_size) {
zone->free_definite_size = functions->free_definite_size;
}
// Restore protection if it was active.
if (reprotection_start) {
kern_return_t result =
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
false, reprotection_value);
MACH_CHECK(result == KERN_SUCCESS, result) << "vm_protect";
}
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,398 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include <errno.h>
#include <atomic>
#include <new>
#include "base/allocator/buildflags.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/macros.h"
#include "base/process/process_metrics.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#if !defined(OS_WIN)
#include <unistd.h>
#else
#include "base/allocator/winheap_stubs_win.h"
#endif
#if defined(OS_APPLE)
#include <malloc/malloc.h>
#include "base/allocator/allocator_interception_mac.h"
#endif
// No calls to malloc / new in this file. They would would cause re-entrancy of
// the shim, which is hard to deal with. Keep this code as simple as possible
// and don't use any external C++ object here, not even //base ones. Even if
// they are safe to use today, in future they might be refactored.
namespace {
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
&base::allocator::AllocatorDispatch::default_dispatch};
bool g_call_new_handler_on_malloc_failure = false;
ALWAYS_INLINE size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize)
pagesize = base::GetPageSize();
return pagesize;
}
// Calls the std::new handler thread-safely. Returns true if a new_handler was
// set and called, false if no new_handler was set.
bool CallNewHandler(size_t size) {
#if defined(OS_WIN)
return base::allocator::WinCallNewHandler(size);
#else
std::new_handler nh = std::get_new_handler();
if (!nh)
return false;
(*nh)();
// Assume the new_handler will abort if it fails. Exception are disabled and
// we don't support the case of a new_handler throwing std::bad_balloc.
return true;
#endif
}
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
return g_chain_head.load(std::memory_order_relaxed);
}
} // namespace
namespace base {
namespace allocator {
void SetCallNewHandlerOnMallocFailure(bool value) {
g_call_new_handler_on_malloc_failure = value;
}
void* UncheckedAlloc(size_t size) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
}
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
// Loop in case of (an unlikely) race on setting the list head.
size_t kMaxRetries = 7;
for (size_t i = 0; i < kMaxRetries; ++i) {
const AllocatorDispatch* chain_head = GetChainHead();
dispatch->next = chain_head;
// This function guarantees to be thread-safe w.r.t. concurrent
// insertions. It also has to guarantee that all the threads always
// see a consistent chain, hence the atomic_thread_fence() below.
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
// we don't really want this to be a release-store with a corresponding
// acquire-load during malloc().
std::atomic_thread_fence(std::memory_order_seq_cst);
// Set the chain head to the new dispatch atomically. If we lose the race,
// retry.
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
std::memory_order_relaxed,
std::memory_order_relaxed)) {
// Success.
return;
}
}
CHECK(false); // Too many retries, this shouldn't happen.
}
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
DCHECK_EQ(GetChainHead(), dispatch);
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
}
} // namespace allocator
} // namespace base
// The Shim* functions below are the entry-points into the shim-layer and
// are supposed to be invoked by the allocator_shim_override_*
// headers to route the malloc / new symbols through the shim layer.
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
// between the system-defined entry points and the shim implementations.
extern "C" {
// The general pattern for allocations is:
// - Try to allocate, if succeded return the pointer.
// - If the allocation failed:
// - Call the std::new_handler if it was a C++ allocation.
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
// AND SetCallNewHandlerOnMallocFailure(true).
// - If the std::new_handler is NOT set just return nullptr.
// - If the std::new_handler is set:
// - Assume it will abort() if it fails (very likely the new_handler will
// just suicide printing a message).
// - Assume it did succeed if it returns, in which case reattempt the alloc.
ALWAYS_INLINE void* ShimCppNew(size_t size) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if defined(OS_APPLE)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_function(chain_head, size, context);
} while (!ptr && CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
void* context = nullptr;
#if defined(OS_APPLE)
context = malloc_default_zone();
#endif
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->alloc_unchecked_function(chain_head, size, context);
}
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
void* context = nullptr;
#if defined(OS_APPLE)
context = malloc_default_zone();
#endif
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (!ptr && CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void ShimCppDelete(void* address) {
void* context = nullptr;
#if defined(OS_APPLE)
context = malloc_default_zone();
#endif
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_function(chain_head, address, context);
}
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_function(chain_head, size, context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
// realloc(size == 0) means free() and might return a nullptr. We should
// not call the std::new_handler in that case, though.
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->realloc_function(chain_head, address, size, context);
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr;
do {
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
// in tc_malloc.cc.
if (((alignment % sizeof(void*)) != 0) ||
!base::bits::IsPowerOfTwo(alignment)) {
return EINVAL;
}
void* ptr = ShimMemalign(alignment, size, nullptr);
*res = ptr;
return ptr ? 0 : ENOMEM;
}
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
return ShimMemalign(GetCachedPageSize(), size, context);
}
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
// pvalloc(0) should allocate one page, according to its man page.
if (size == 0) {
size = GetCachedPageSize();
} else {
size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
}
// The third argument is nullptr because pvalloc is glibc only and does not
// exist on OSX/BSD systems.
return ShimMemalign(GetCachedPageSize(), size, nullptr);
}
ALWAYS_INLINE void ShimFree(void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_function(chain_head, address, context);
}
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->get_size_estimate_function(
chain_head, const_cast<void*>(address), context);
}
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
void** results,
unsigned num_requested,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->batch_malloc_function(chain_head, size, results,
num_requested, context);
}
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->batch_free_function(chain_head, to_be_freed,
num_to_be_freed, context);
}
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->free_definite_size_function(chain_head, ptr, size,
context);
}
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
size_t alignment,
void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context) {
// _aligned_realloc(size == 0) means _aligned_free() and might return a
// nullptr. We should not call the std::new_handler in that case, though.
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
alignment, context);
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->aligned_free_function(chain_head, address, context);
}
} // extern "C"
#if !defined(OS_WIN) && !defined(OS_APPLE)
// Cpp symbols (new / delete) should always be routed through the shim layer
// except on Windows and macOS where the malloc intercept is deep enough that it
// also catches the cpp calls.
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
#endif
#if defined(OS_ANDROID)
// Android does not support symbol interposition. The way malloc symbols are
// intercepted on Android is by using link-time -wrap flags.
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
#elif defined(OS_WIN)
// On Windows we use plain link-time overriding of the CRT symbols.
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
#elif defined(OS_APPLE)
#include "base/allocator/allocator_shim_override_mac_symbols.h"
#else
#include "base/allocator/allocator_shim_override_libc_symbols.h"
#endif
// In the case of tcmalloc we also want to plumb into the glibc hooks
// to avoid that allocations made in glibc itself (e.g., strdup()) get
// accidentally performed on the glibc heap.
//
// More details:
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
// as we are then asked to free memory we didn't allocate.
//
// This only happened in glibc to allocate TLS storage metadata, and there are
// no other callers of __libc_memalign() there as of September 2020. To work
// around this issue, intercept this internal libc symbol to make sure that both
// the allocation and the free() are caught by the shim.
//
// This seems fragile, and is, but there is ample precedent for it, making it
// quite likely to keep working in the future. For instance, both tcmalloc (in
// libc_override_glibc.h, see in third_party/tcmalloc) and LLVM for LSAN use the
// same mechanism.
#if defined(LIBC_GLIBC) && \
(BUILDFLAG(USE_TCMALLOC) || BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
#endif
#if defined(OS_APPLE)
namespace base {
namespace allocator {
void InitializeAllocatorShim() {
// Prepares the default dispatch. After the intercepted malloc calls have
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
// This replaces the default malloc zone, causing calls to malloc & friends
// from the codebase to be routed to ShimMalloc() above.
base::allocator::ReplaceFunctionsForStoredZones(&functions);
}
} // namespace allocator
} // namespace base
#endif
// Cross-checks.
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#error The allocator shim should not be compiled when building for memory tools.
#endif
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
(defined(_MSC_VER) && defined(_CPPUNWIND))
#error This code cannot be used when exceptions are turned on.
#endif

View File

@ -0,0 +1,178 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
#include <stddef.h>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/base_export.h"
#include "build/build_config.h"
namespace base {
namespace allocator {
// Allocator Shim API. Allows to:
// - Configure the behavior of the allocator (what to do on OOM failures).
// - Install new hooks (AllocatorDispatch) in the allocator chain.
// When this shim layer is enabled, the route of an allocation is as-follows:
//
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
// The override_* headers define the symbols required to intercept calls to
// malloc() and operator new (if not overridden by specific C++ classes).
//
// [allocator_shim.cc] Routing allocation calls to the shim:
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
// ShimCppNew() etc. methods defined in allocator_shim.cc.
// These methods will: (1) forward the allocation call to the front of the
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
// call std::new_handler on OOM failure).
//
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
// It is a singly linked list where each element is a struct with function
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
// consists of a single AllocatorDispatch element, herein called
// the "default dispatch", which is statically defined at build time and
// ultimately routes the calls to the actual allocator defined by the build
// config (tcmalloc, glibc, ...).
//
// It is possible to dynamically insert further AllocatorDispatch stages
// to the front of the chain, for debugging / profiling purposes.
//
// All the functions must be thread safe. The shim does not enforce any
// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
// wihout introducing unnecessary perf hits.
struct AllocatorDispatch {
using AllocFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
size_t size,
void* context);
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context);
using AllocAlignedFn = void*(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context);
using ReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
void* context);
using FreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
// Returns the best available estimate for the actual amount of memory
// consumed by the allocation |address|. If possible, this should include
// heap overhead or at least a decent estimate of the full cost of the
// allocation. If no good estimate is possible, returns zero.
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
void* address,
void* context);
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context);
using BatchFreeFn = void(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context);
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context);
using AlignedMallocFn = void*(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context);
using AlignedReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context);
using AlignedFreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
AllocFn* const alloc_function;
AllocUncheckedFn* const alloc_unchecked_function;
AllocZeroInitializedFn* const alloc_zero_initialized_function;
AllocAlignedFn* const alloc_aligned_function;
ReallocFn* const realloc_function;
FreeFn* const free_function;
GetSizeEstimateFn* const get_size_estimate_function;
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
// and iOS allocators.
BatchMallocFn* const batch_malloc_function;
BatchFreeFn* const batch_free_function;
FreeDefiniteSizeFn* const free_definite_size_function;
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
// Windows allocator.
AlignedMallocFn* const aligned_malloc_function;
AlignedReallocFn* const aligned_realloc_function;
AlignedFreeFn* const aligned_free_function;
const AllocatorDispatch* next;
// |default_dispatch| is statically defined by one (and only one) of the
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
// configuration.
static const AllocatorDispatch default_dispatch;
};
// When true makes malloc behave like new, w.r.t calling the new_handler if
// the allocation fails (see set_new_mode() in Windows).
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
// regardless of SetCallNewHandlerOnMallocFailure().
BASE_EXPORT void* UncheckedAlloc(size_t size);
// Inserts |dispatch| in front of the allocator chain. This method is
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
// The callers have responsibility for inserting a single dispatch no more
// than once.
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
// removal of arbitrary elements from a singly linked list would require a lock
// in malloc(), which we really don't want.
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(OS_WIN)
// Configures the allocator for the caller's allocation domain. Allocations that
// take place prior to this configuration step will succeed, but will not
// benefit from its one-time mitigations. As such, this function must be called
// as early as possible during startup.
BASE_EXPORT void ConfigurePartitionAlloc();
#endif // defined(OS_WIN)
#if defined(OS_APPLE)
// On macOS, the allocator shim needs to be turned on during runtime.
BASE_EXPORT void InitializeAllocatorShim();
#endif // defined(OS_APPLE)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
BASE_EXPORT void ReconfigurePartitionAllocLazyCommit();
BASE_EXPORT void ConfigurePartitionRefCountSupport(bool enable_ref_count);
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_ALLOW_PCSCAN
BASE_EXPORT void EnablePCScan();
#endif
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_

View File

@ -0,0 +1,89 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include "base/compiler_specific.h"
#include <dlfcn.h>
#include <malloc.h>
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to libc functions.
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
extern "C" {
void* __libc_malloc(size_t size);
void* __libc_calloc(size_t n, size_t size);
void* __libc_realloc(void* address, size_t size);
void* __libc_memalign(size_t alignment, size_t size);
void __libc_free(void* ptr);
} // extern "C"
namespace {
using base::allocator::AllocatorDispatch;
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
return __libc_malloc(size);
}
void* GlibcCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
return __libc_calloc(n, size);
}
void* GlibcRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return __libc_realloc(address, size);
}
void* GlibcMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
return __libc_memalign(alignment, size);
}
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
__libc_free(address);
}
NO_SANITIZE("cfi-icall")
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
// resolve it instead. This should be safe because glibc (and hence dlfcn)
// does not use malloc_size internally and so there should not be a risk of
// recursion.
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
static MallocUsableSizeFunction fn_ptr =
reinterpret_cast<MallocUsableSizeFunction>(
dlsym(RTLD_NEXT, "malloc_usable_size"));
return fn_ptr(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&GlibcMalloc, /* alloc_function */
&GlibcMalloc, /* alloc_unchecked_function */
&GlibcCalloc, /* alloc_zero_initialized_function */
&GlibcMemalign, /* alloc_aligned_function */
&GlibcRealloc, /* realloc_function */
&GlibcFree, /* free_function */
&GlibcGetSizeEstimate, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};

View File

@ -0,0 +1,77 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <malloc.h>
#include "base/allocator/allocator_shim.h"
#include "build/build_config.h"
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to the original libc functions when using the link-time
// -Wl,-wrap,malloc approach (see README.md).
// The __real_X functions here are special symbols that the linker will relocate
// against the real "X" undefined symbol, so that __real_malloc becomes the
// equivalent of what an undefined malloc symbol reference would have been.
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
// which routes the __wrap_X functions into the shim.
extern "C" {
void* __real_malloc(size_t);
void* __real_calloc(size_t, size_t);
void* __real_realloc(void*, size_t);
void* __real_memalign(size_t, size_t);
void* __real_free(void*);
} // extern "C"
namespace {
using base::allocator::AllocatorDispatch;
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
return __real_malloc(size);
}
void* RealCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
return __real_calloc(n, size);
}
void* RealRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return __real_realloc(address, size);
}
void* RealMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
return __real_memalign(alignment, size);
}
void RealFree(const AllocatorDispatch*, void* address, void* context) {
__real_free(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&RealMalloc, /* alloc_function */
&RealMalloc, /* alloc_unchecked_function */
&RealCalloc, /* alloc_zero_initialized_function */
&RealMemalign, /* alloc_aligned_function */
&RealRealloc, /* realloc_function */
&RealFree, /* free_function */
nullptr, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};

View File

@ -0,0 +1,107 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <utility>
#include "base/allocator/allocator_interception_mac.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/malloc_zone_functions_mac.h"
namespace base {
namespace allocator {
namespace {
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
size);
}
void* CallocImpl(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
size);
}
void* MemalignImpl(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
alignment, size);
}
void* ReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
ptr, size);
}
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
}
unsigned BatchMallocImpl(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
return functions.batch_malloc(
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
num_requested);
}
void BatchFreeImpl(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
to_be_freed, num_to_be_freed);
}
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context) {
MallocZoneFunctions& functions = GetFunctionsForZone(context);
functions.free_definite_size(
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&MallocImpl, /* alloc_function */
&MallocImpl, /* alloc_unchecked_function */
&CallocImpl, /* alloc_zero_initialized_function */
&MemalignImpl, /* alloc_aligned_function */
&ReallocImpl, /* realloc_function */
&FreeImpl, /* free_function */
&GetSizeEstimateImpl, /* get_size_estimate_function */
&BatchMallocImpl, /* batch_malloc_function */
&BatchFreeImpl, /* batch_free_function */
&FreeDefiniteSizeImpl, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,484 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#include <cstddef>
#include "base/allocator/allocator_shim_internals.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/bits.h"
#include "base/no_destructor.h"
#include "base/numerics/checked_math.h"
#include "base/partition_alloc_buildflags.h"
#include "build/build_config.h"
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
#include <malloc.h>
#endif
using base::allocator::AllocatorDispatch;
namespace {
// We would usually make g_root a static local variable, as these are guaranteed
// to be thread-safe in C++11. However this does not work on Windows, as the
// initialization calls into the runtime, which is not prepared to handle it.
//
// To sidestep that, we implement our own equivalent to a local `static
// base::NoDestructor<base::ThreadSafePartitionRoot> root`.
//
// The ingredients are:
// - Placement new to avoid a static constructor, and a static destructor.
// - Double-checked locking to get the same guarantees as a static local
// variable.
// Lock for double-checked locking.
std::atomic<bool> g_initialization_lock;
std::atomic<base::ThreadSafePartitionRoot*> g_root_;
// Buffer for placement new.
alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer[sizeof(base::ThreadSafePartitionRoot)];
// Original g_root_ if it was replaced by ConfigurePartitionRefCountSupport().
std::atomic<base::ThreadSafePartitionRoot*> g_original_root_(nullptr);
base::ThreadSafePartitionRoot* Allocator() {
// Double-checked locking.
//
// The proper way to proceed is:
//
// auto* root = load_acquire(g_root);
// if (!root) {
// ScopedLock initialization_lock;
// root = load_relaxed(g_root);
// if (root)
// return root;
// new_root = Create new root.
// release_store(g_root, new_root);
// }
//
// We don't want to use a base::Lock here, so instead we use the
// compare-and-exchange on a lock variable, but this provides the same
// guarantees as a regular lock. The code could be made simpler as we have
// stricter requirements, but we stick to something close to a regular lock
// for ease of reading, as none of this is performance-critical anyway.
//
// If we boldly assume that initialization will always be single-threaded,
// then we could remove all these atomic operations, but this seems a bit too
// bold to try yet. Might be worth revisiting though, since this would remove
// a memory barrier at each load. We could probably guarantee single-threaded
// init by adding a static constructor which allocates (and hence triggers
// initialization before any other thread is created).
auto* root = g_root_.load(std::memory_order_acquire);
if (LIKELY(root))
return root;
bool expected = false;
// Semantically equivalent to base::Lock::Acquire().
while (!g_initialization_lock.compare_exchange_strong(
expected, true, std::memory_order_acquire, std::memory_order_acquire)) {
expected = false;
}
root = g_root_.load(std::memory_order_relaxed);
// Someone beat us.
if (root) {
// Semantically equivalent to base::Lock::Release().
g_initialization_lock.store(false, std::memory_order_release);
return root;
}
auto* new_root = new (g_allocator_buffer) base::ThreadSafePartitionRoot({
base::PartitionOptions::Alignment::kRegular,
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
!BUILDFLAG(ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL)
base::PartitionOptions::ThreadCache::kEnabled,
#elif BUILDFLAG(ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL)
// With ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL, if GigaCage is enabled,
// this partition is only temporary until BackupRefPtr is re-configured
// at run-time. Leave the ability to have a thread cache to the main
// partition. (Note that ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL implies
// that USE_BACKUP_REF_PTR is true.)
//
// Note that it is ok to use RefCount::kEnabled below regardless of the
// GigaCage check, because the constructor will disable ref-count if
// GigaCage is disabled.
base::features::IsPartitionAllocGigaCageEnabled()
? base::PartitionOptions::ThreadCache::kDisabled
: base::PartitionOptions::ThreadCache::kEnabled,
#else
// Other tests, such as the ThreadCache tests create a thread cache, and
// only one is supported at a time.
base::PartitionOptions::ThreadCache::kDisabled,
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// !BUILDFLAG(ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL)
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::RefCount::kEnabled,
});
g_root_.store(new_root, std::memory_order_release);
// Semantically equivalent to base::Lock::Release().
g_initialization_lock.store(false, std::memory_order_release);
return new_root;
}
base::ThreadSafePartitionRoot* OriginalAllocator() {
return g_original_root_.load(std::memory_order_relaxed);
}
base::ThreadSafePartitionRoot* AlignedAllocator() {
#if !DCHECK_IS_ON() && (!BUILDFLAG(USE_BACKUP_REF_PTR) || \
BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION))
// There are no tags or cookies in front of the allocation, so the regular
// allocator provides suitably aligned memory already.
return Allocator();
#else
// Since the general-purpose allocator uses the thread cache, this one cannot.
static base::NoDestructor<base::ThreadSafePartitionRoot> aligned_allocator(
base::PartitionOptions{base::PartitionOptions::Alignment::kAlignedAlloc,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::RefCount::kDisabled});
return aligned_allocator.get();
#endif
}
#if defined(OS_WIN) && defined(ARCH_CPU_X86)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
bool IsRunning32bitEmulatedOnArm64() {
using IsWow64Process2Function = decltype(&IsWow64Process2);
IsWow64Process2Function is_wow64_process2 =
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
if (!is_wow64_process2)
return false;
USHORT process_machine;
USHORT native_machine;
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
&native_machine);
if (!retval)
return false;
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
return true;
return false;
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
size_t g_extra_bytes;
#endif // defined(OS_WIN) && defined(ARCH_CPU_X86)
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
#if defined(OS_WIN) && defined(ARCH_CPU_X86)
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
#else // defined(OS_WIN) && defined(ARCH_CPU_X86)
return size;
#endif // defined(OS_WIN) && defined(ARCH_CPU_X86)
}
void* AllocateAlignedMemory(size_t alignment, size_t size) {
// Memory returned by the regular allocator *always* respects |kAlignment|,
// which is a power of two, and any valid alignment is also a power of two. So
// we can directly fulfill these requests with the main allocator.
//
// This has several advantages:
// - The thread cache is supported on the main partition
// - Reduced fragmentation
// - Better coverage for MiraclePtr variants requiring extras
//
// There are several call sites in Chromium where base::AlignedAlloc is called
// with a small alignment. Some may be due to overly-careful code, some are
// because the client code doesn't know the required alignment at compile
// time.
//
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
// instance) directly call PartitionFree(), so there is no risk of
// mismatch. (see below the default_dispatch definition).
if (alignment <= base::kAlignment) {
// This is mandated by |posix_memalign()| and friends, so should never fire.
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
return Allocator()->AllocFlagsNoHooks(0, size);
}
return AlignedAllocator()->AlignedAllocFlags(base::PartitionAllocNoHooks,
alignment, size);
}
} // namespace
namespace base {
namespace internal {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
return Allocator()->AllocFlagsNoHooks(0, MaybeAdjustSize(size));
}
void* PartitionMallocUnchecked(const AllocatorDispatch*,
size_t size,
void* context) {
return Allocator()->AllocFlagsNoHooks(base::PartitionAllocReturnNull,
MaybeAdjustSize(size));
}
void* PartitionCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
return Allocator()->AllocFlagsNoHooks(base::PartitionAllocZeroFill, total);
}
void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
return AllocateAlignedMemory(alignment, size);
}
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context) {
return AllocateAlignedMemory(alignment, size);
}
// aligned_realloc documentation is
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
// TODO(tasak): Expand the given memory block to the given size if possible.
// This realloc always free the original memory block and allocates a new memory
// block.
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocFlags and
// use it.
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
void* address,
size_t size,
size_t alignment,
void* context) {
void* new_ptr = nullptr;
if (size > 0) {
size = MaybeAdjustSize(size);
new_ptr = AllocateAlignedMemory(alignment, size);
} else {
// size == 0 and address != null means just "free(address)".
if (address)
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
// The original memory block (specified by address) is unchanged if ENOMEM.
if (!new_ptr)
return nullptr;
// TODO(tasak): Need to compare the new alignment with the address' alignment.
// If the two alignments are not the same, need to return nullptr with EINVAL.
if (address) {
size_t usage = base::ThreadSafePartitionRoot::GetUsableSize(address);
size_t copy_size = usage > size ? size : usage;
memcpy(new_ptr, address, copy_size);
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
return new_ptr;
}
void* PartitionRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return Allocator()->ReallocFlags(base::PartitionAllocNoHooks, address,
MaybeAdjustSize(size), "");
}
void PartitionFree(const AllocatorDispatch*, void* address, void* context) {
base::ThreadSafePartitionRoot::FreeNoHooks(address);
}
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
// TODO(lizeb): Returns incorrect values for aligned allocations.
return base::ThreadSafePartitionRoot::GetUsableSize(address);
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
return ::Allocator();
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
return ::OriginalAllocator();
}
// static
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
return ::AlignedAllocator();
}
} // namespace internal
} // namespace base
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
namespace base {
namespace allocator {
void EnablePartitionAllocMemoryReclaimer() {
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
// their PartitionRoots to the memory reclaimer, because doing so may allocate
// memory. Thus, the registration to the memory reclaimer has to be done
// some time later, when the main root is fully configured.
// TODO(bartekn): Aligned allocator can use the regular initialization path.
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(Allocator());
auto* original_root = OriginalAllocator();
if (original_root)
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(original_root);
if (AlignedAllocator() != Allocator()) {
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
AlignedAllocator());
}
}
void ReconfigurePartitionAllocLazyCommit() {
// Unlike other partitions, Allocator() and AlignedAllocator() do not
// configure lazy commit upfront, because it uses base::Feature, which in turn
// allocates memory. Thus, lazy commit configuration has to be done after
// base::FeatureList is initialized.
// TODO(bartekn): Aligned allocator can use the regular initialization path.
Allocator()->ConfigureLazyCommit();
auto* original_root = OriginalAllocator();
if (original_root)
original_root->ConfigureLazyCommit();
AlignedAllocator()->ConfigureLazyCommit();
}
// Note that ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL implies that
// USE_BACKUP_REF_PTR is true.
#if BUILDFLAG(ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL)
alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_ref_count_config[sizeof(
base::ThreadSafePartitionRoot)];
void ConfigurePartitionRefCountSupport(bool enable_ref_count) {
// If GigaCage is disabled, don't configure a new partition with ref-count
// enabled, as it'll be ineffective thus wasteful (increased fragmentation).
// Furthermore, in this case, the main partition has thread cache enabled, so
// creating one more here simply wouldn't work.
if (!base::features::IsPartitionAllocGigaCageEnabled())
return;
auto* current_root = g_root_.load(std::memory_order_acquire);
// We expect a number of heap allocations to be made before this function is
// called, which should force the `g_root` initialization.
PA_CHECK(current_root);
current_root->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages);
auto* new_root = new (g_allocator_buffer_for_ref_count_config)
base::ThreadSafePartitionRoot({
base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kEnabled,
base::PartitionOptions::Quarantine::kAllowed,
enable_ref_count ? base::PartitionOptions::RefCount::kEnabled
: base::PartitionOptions::RefCount::kDisabled,
});
g_root_.store(new_root, std::memory_order_release);
g_original_root_ = current_root;
}
#endif // BUILDFLAG(ENABLE_RUNTIME_BACKUP_REF_PTR_CONTROL)
#if PA_ALLOW_PCSCAN
void EnablePCScan() {
auto& pcscan = internal::PCScan<internal::ThreadSafe>::Instance();
pcscan.RegisterScannableRoot(Allocator());
if (Allocator() != AlignedAllocator())
pcscan.RegisterScannableRoot(AlignedAllocator());
}
#endif
#if defined(OS_WIN)
// Call this as soon as possible during startup.
void ConfigurePartitionAlloc() {
#if defined(ARCH_CPU_X86)
if (IsRunning32bitEmulatedOnArm64())
g_extra_bytes = 8;
#endif // defined(ARCH_CPU_X86)
}
#endif // defined(OS_WIN)
} // namespace allocator
} // namespace base
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
&base::internal::PartitionMalloc, // alloc_function
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
&base::internal::PartitionMemalign, // alloc_aligned_function
&base::internal::PartitionRealloc, // realloc_function
&base::internal::PartitionFree, // free_function
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
nullptr, // batch_malloc_function
nullptr, // batch_free_function
nullptr, // free_definite_size_function
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
&base::internal::PartitionFree, // aligned_free_function
nullptr, // next
};
// Intercept diagnostics symbols as well, even though they are not part of the
// unified shim layer.
//
// TODO(lizeb): Implement the ones that doable.
extern "C" {
#if !defined(OS_APPLE)
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
return 0;
}
#endif // !defined(OS_APPLE)
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
base::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper);
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
base::SimplePartitionStatsDumper aligned_allocator_dumper;
if (AlignedAllocator() != Allocator()) {
AlignedAllocator()->DumpStats("posix_memalign", true,
&aligned_allocator_dumper);
}
struct mallinfo info = {0};
info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size.
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes;
// Resident bytes.
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes;
// Allocated bytes.
info.uordblks = allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes;
return info;
}
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
} // extern "C"
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -0,0 +1,73 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
#include "base/allocator/allocator_shim.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/base_export.h"
namespace base {
namespace internal {
class BASE_EXPORT PartitionAllocMalloc {
public:
static ThreadSafePartitionRoot* Allocator();
// May return |nullptr|, will never return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* OriginalAllocator();
// May return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* AlignedAllocator();
};
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
size_t size,
void* context);
BASE_EXPORT void* PartitionMallocUnchecked(
const base::allocator::AllocatorDispatch*,
size_t size,
void* context);
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
size_t n,
size_t size,
void* context);
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
size_t alignment,
size_t size,
void* context);
BASE_EXPORT void* PartitionAlignedAlloc(
const base::allocator::AllocatorDispatch* dispatch,
size_t size,
size_t alignment,
void* context);
BASE_EXPORT void* PartitionAlignedRealloc(
const base::allocator::AllocatorDispatch* dispatch,
void* address,
size_t size,
size_t alignment,
void* context);
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
void* address,
size_t size,
void* context);
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
void* address,
void* context);
BASE_EXPORT size_t
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
void* address,
void* context);
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_

View File

@ -0,0 +1,89 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include "base/allocator/allocator_shim_internals.h"
#include "third_party/tcmalloc/chromium/src/config.h"
#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
namespace {
using base::allocator::AllocatorDispatch;
void* TCMalloc(const AllocatorDispatch*, size_t size, void* context) {
return tc_malloc(size);
}
void* TCMallocUnchecked(const AllocatorDispatch*, size_t size, void* context) {
return tc_malloc_skip_new_handler(size);
}
void* TCCalloc(const AllocatorDispatch*, size_t n, size_t size, void* context) {
return tc_calloc(n, size);
}
void* TCMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
return tc_memalign(alignment, size);
}
void* TCRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return tc_realloc(address, size);
}
void TCFree(const AllocatorDispatch*, void* address, void* context) {
tc_free(address);
}
size_t TCGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
return tc_malloc_size(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&TCMalloc, /* alloc_function */
&TCMallocUnchecked, /* alloc_unchecked_function */
&TCCalloc, /* alloc_zero_initialized_function */
&TCMemalign, /* alloc_aligned_function */
&TCRealloc, /* realloc_function */
&TCFree, /* free_function */
&TCGetSizeEstimate, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
// In the case of tcmalloc we have also to route the diagnostic symbols,
// which are not part of the unified shim layer, to tcmalloc for consistency.
extern "C" {
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {
return tc_malloc_stats();
}
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
return tc_mallopt(cmd, value);
}
#ifdef HAVE_STRUCT_MALLINFO
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
return tc_mallinfo();
}
#endif
} // extern "C"

View File

@ -0,0 +1,106 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include <ostream>
#include "base/allocator/winheap_stubs_win.h"
#include "base/check.h"
namespace {
using base::allocator::AllocatorDispatch;
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
size_t size,
void* context) {
return base::allocator::WinHeapMalloc(size);
}
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
size_t n,
size_t elem_size,
void* context) {
// Overflow check.
const size_t size = n * elem_size;
if (elem_size != 0 && size / elem_size != n)
return nullptr;
void* result = DefaultWinHeapMallocImpl(self, size, context);
if (result) {
memset(result, 0, size);
}
return result;
}
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
CHECK(false) << "The windows heap does not support memalign.";
return nullptr;
}
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
return base::allocator::WinHeapRealloc(address, size);
}
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
void* address,
void* context) {
base::allocator::WinHeapFree(address);
}
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
void* address,
void* context) {
return base::allocator::WinHeapGetSizeEstimate(address);
}
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedMalloc(size, alignment);
}
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
}
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
void* ptr,
void* context) {
base::allocator::WinHeapAlignedFree(ptr);
}
} // namespace
// Guarantee that default_dispatch is compile-time initialized to avoid using
// it before initialization (allocations before main in release builds with
// optimizations disabled).
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
&DefaultWinHeapMallocImpl,
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
&DefaultWinHeapCallocImpl,
&DefaultWinHeapMemalignImpl,
&DefaultWinHeapReallocImpl,
&DefaultWinHeapFreeImpl,
&DefaultWinHeapGetSizeEstimateImpl,
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
&DefaultWinHeapAlignedMallocImpl,
&DefaultWinHeapAlignedReallocImpl,
&DefaultWinHeapAlignedFreeImpl,
nullptr, /* next */
};

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
#include "build/build_config.h"
#if defined(__GNUC__)
#include <sys/cdefs.h> // for __THROW
#ifndef __THROW // Not a glibc system
#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
#define __THROW _NOEXCEPT
#else
#define __THROW
#endif // !_NOEXCEPT
#endif
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
//
// If an exported symbol is linked into a DSO, it may be preempted by a
// definition in the main executable. If this happens to an allocator symbol, it
// will mean that the DSO will use the main executable's allocator. This is
// normally relatively harmless -- regular allocations should all use the same
// allocator, but if the DSO tries to hook the allocator it will not see any
// allocations.
//
// However, if LLVM LTO is enabled, the compiler may inline the shim layer
// symbols into callers. The end result is that allocator calls in DSOs may use
// either the main executable's allocator or the DSO's allocator, depending on
// whether the call was inlined. This is arguably a bug in LLVM caused by its
// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
// To work around the bug we use noinline to prevent the symbols from being
// inlined.
//
// In the long run we probably want to avoid linking the allocator bits into
// DSOs altogether. This will save a little space and stop giving DSOs the false
// impression that they can hook the allocator.
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
#elif defined(OS_WIN) // __GNUC__
#define __THROW
#define SHIM_ALWAYS_EXPORT __declspec(noinline)
#endif // __GNUC__
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_

View File

@ -0,0 +1,152 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
// Preempt the default new/delete C++ symbols so they call the shim entry
// points. This file is strongly inspired by tcmalloc's
// libc_override_redefine.h.
#include <new>
#include "base/allocator/allocator_shim_internals.h"
#include "build/build_config.h"
// std::align_val_t isn't available until C++17, but we want to override aligned
// new/delete anyway to prevent a possible situation where a library gets loaded
// in that uses the aligned operators. We want to avoid a situation where
// separate heaps are used.
// TODO(thomasanderson): Remove this once building with C++17 or later.
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
#define ALIGN_VAL_T std::align_val_t
#define ALIGN_LINKAGE
#define ALIGN_NEW operator new
#define ALIGN_NEW_NOTHROW operator new
#define ALIGN_DEL operator delete
#define ALIGN_DEL_SIZED operator delete
#define ALIGN_DEL_NOTHROW operator delete
#define ALIGN_NEW_ARR operator new[]
#define ALIGN_NEW_ARR_NOTHROW operator new[]
#define ALIGN_DEL_ARR operator delete[]
#define ALIGN_DEL_ARR_SIZED operator delete[]
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
#else
#define ALIGN_VAL_T size_t
#define ALIGN_LINKAGE extern "C"
#if defined(OS_APPLE) || defined(OS_WIN)
#error "Mangling is different on these platforms."
#else
#define ALIGN_NEW _ZnwmSt11align_val_t
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL _ZdlPvSt11align_val_t
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
#endif
#endif
SHIM_ALWAYS_EXPORT void* operator new(size_t size) {
return ShimCppNew(size);
}
SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW {
ShimCppDelete(p);
}
SHIM_ALWAYS_EXPORT void* operator new[](size_t size) {
return ShimCppNew(size);
}
SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW {
ShimCppDelete(p);
}
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_ALWAYS_EXPORT void operator delete[](void* p,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_ALWAYS_EXPORT void operator delete(void* p, size_t) __THROW {
ShimCppDelete(p);
}
SHIM_ALWAYS_EXPORT void operator delete[](void* p, size_t) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW(std::size_t size,
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_NOTHROW(
std::size_t size,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL(void* p, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_SIZED(void* p,
std::size_t size,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_ARR(std::size_t size,
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
std::size_t size,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_ARR(void* p,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_ARR_SIZED(void* p,
std::size_t size,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}

View File

@ -0,0 +1,119 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
// Alias the internal Glibc symbols to the shim entry points.
// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
// Effectively this file does two things:
// 1) Re-define the __malloc_hook & co symbols. Those symbols are defined as
// weak in glibc and are meant to be defined strongly by client processes
// to hook calls initiated from within glibc.
// 2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
// is that in the past (in RedHat 9) we had instances of libraries that were
// allocating via malloc() and freeing using __libc_free().
// See tcmalloc's libc_override_glibc.h for more context.
#include <features.h> // for __GLIBC__
#include <malloc.h>
#include <unistd.h>
#include <new>
#include "base/allocator/allocator_shim_internals.h"
// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
#if !defined(__MALLOC_HOOK_VOLATILE)
#define MALLOC_HOOK_MAYBE_VOLATILE /**/
#else
#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
#endif
extern "C" {
// 1) Re-define malloc_hook weak symbols.
namespace {
void* GlibcMallocHook(size_t size, const void* caller) {
return ShimMalloc(size, nullptr);
}
void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
return ShimRealloc(ptr, size, nullptr);
}
void GlibcFreeHook(void* ptr, const void* caller) {
return ShimFree(ptr, nullptr);
}
void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
return ShimMemalign(align, size, nullptr);
}
} // namespace
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
const void*) = &GlibcMallocHook;
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
&GlibcReallocHook;
__attribute__((visibility("default"))) void (
*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
const void*) = &GlibcFreeHook;
__attribute__((visibility("default"))) void* (
*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
&GlibcMemalignHook;
// 2) Redefine libc symbols themselves.
SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
return ShimRealloc(ptr, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
return ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
return ShimValloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
return ShimPosixMemalign(r, a, s);
}
} // extern "C"
// Safety check.
#if !defined(__GLIBC__)
#error The target platform does not seem to use Glibc. Disable the allocator \
shim by setting use_allocator_shim=false in GN args.
#endif

View File

@ -0,0 +1,77 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Its purpose is to preempt the Libc symbols for malloc/new so they call the
// shim layer entry points.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
#if defined(OS_APPLE)
#include <malloc/malloc.h>
#else
#include <malloc.h>
#endif
#include "base/allocator/allocator_shim_internals.h"
extern "C" {
SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
return ShimRealloc(ptr, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
return ShimMemalign(align, s, nullptr);
}
SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
return ShimValloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
return ShimPosixMemalign(r, a, s);
}
SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
return ShimGetSizeEstimate(address, nullptr);
}
SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
return ShimGetSizeEstimate(address, nullptr);
}
// The default dispatch translation unit has to define also the following
// symbols (unless they are ultimately routed to the system symbols):
// void malloc_stats(void);
// int mallopt(int, int);
// struct mallinfo mallinfo(void);
} // extern "C"

View File

@ -0,0 +1,147 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
// This header overrides the __wrap_X symbols when using the link-time
// -Wl,-wrap,malloc shim-layer approach (see README.md).
// All references to malloc, free, etc. within the linker unit that gets the
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
#include <algorithm>
#include <cstring>
#include "base/allocator/allocator_shim_internals.h"
extern "C" {
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
ShimFree(ptr, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
return ShimMemalign(align, size, nullptr);
}
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
size_t align,
size_t size) {
return ShimPosixMemalign(res, align, size);
}
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
return ShimPvalloc(size);
}
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
return ShimRealloc(address, size, nullptr);
}
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
return ShimValloc(size, nullptr);
}
const size_t kPathMaxSize = 8192;
static_assert(kPathMaxSize >= PATH_MAX, "");
extern char* __wrap_strdup(const char* str);
// Override <stdlib.h>
extern char* __real_realpath(const char* path, char* resolved_path);
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
char* resolved_path) {
if (resolved_path)
return __real_realpath(path, resolved_path);
char buffer[kPathMaxSize];
if (!__real_realpath(path, buffer))
return nullptr;
return __wrap_strdup(buffer);
}
// Override <string.h> functions
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
std::size_t length = std::strlen(str) + 1;
void* buffer = ShimMalloc(length, nullptr);
if (!buffer)
return nullptr;
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
}
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
std::size_t length = std::min(std::strlen(str), n);
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
if (!buffer)
return nullptr;
std::memcpy(buffer, str, length);
buffer[length] = '\0';
return buffer;
}
// Override <unistd.h>
extern char* __real_getcwd(char* buffer, size_t size);
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
if (buffer)
return __real_getcwd(buffer, size);
if (!size)
size = kPathMaxSize;
char local_buffer[size];
if (!__real_getcwd(local_buffer, size))
return nullptr;
return __wrap_strdup(local_buffer);
}
// Override stdio.h
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
// Android, and used by libc++.
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
const char* fmt,
va_list va_args) {
constexpr int kInitialSize = 128;
*strp = static_cast<char*>(
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
// Now we know the size. This is not very efficient, but we cannot really do
// better without accessing internal libc functions, or reimplementing
// *printf().
//
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
// details.
if (actual_size >= kInitialSize)
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
return actual_size;
}
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
va_list va_args;
va_start(va_args, fmt);
int retval = vasprintf(strp, fmt, va_args);
va_end(va_args);
return retval;
}
} // extern "C"

View File

@ -0,0 +1,60 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
#error This header is meant to be included only once by allocator_shim.cc
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
#include "base/allocator/malloc_zone_functions_mac.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
MallocZoneFunctions new_functions;
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
return ShimGetSizeEstimate(ptr, zone);
};
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
return ShimMalloc(size, zone);
};
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
size_t size) -> void* {
return ShimCalloc(n, size, zone);
};
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
return ShimValloc(size, zone);
};
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
ShimFree(ptr, zone);
};
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
size_t size) -> void* {
return ShimRealloc(ptr, size, zone);
};
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
void** results,
unsigned num_requested) -> unsigned {
return ShimBatchMalloc(size, results, num_requested, zone);
};
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
unsigned num_to_be_freed) -> void {
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
};
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
size_t size) -> void* {
return ShimMemalign(alignment, size, zone);
};
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
size_t size) {
ShimFreeDefiniteSize(ptr, size, zone);
};
return new_functions;
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,178 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This header defines symbols to override the same functions in the Visual C++
// CRT implementation.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
#include <malloc.h>
#include <windows.h>
#include "base/allocator/allocator_shim_internals.h"
// Even though most C++ allocation operators can be left alone since the
// interception works at a lower level, these ones should be
// overridden. Otherwise they redirect to malloc(), which is configured to crash
// with an OOM in failure cases, such as allocation requests that are too large.
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
const std::nothrow_t&) __THROW {
return ShimCppNewNoThrow(size);
}
extern "C" {
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
namespace {
int win_new_mode = 0;
} // namespace
// This function behaves similarly to MSVC's _set_new_mode.
// If flag is 0 (default), calls to malloc will behave normally.
// If flag is 1, calls to malloc will behave like calls to new,
// and the std_new_handler will be invoked on failure.
// Returns the previous mode.
//
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
int _set_new_mode(int flag) {
// The MS CRT calls this function early on in startup, so this serves as a low
// overhead proof that the allocator shim is in place for this process.
base::allocator::g_is_win_shim_layer_initialized = true;
int old_mode = win_new_mode;
win_new_mode = flag;
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
return old_mode;
}
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
int _query_new_mode() {
return win_new_mode;
}
// These symbols override the CRT's implementation of the same functions.
__declspec(restrict) void* malloc(size_t size) {
return ShimMalloc(size, nullptr);
}
void free(void* ptr) {
ShimFree(ptr, nullptr);
}
__declspec(restrict) void* realloc(void* ptr, size_t size) {
return ShimRealloc(ptr, size, nullptr);
}
__declspec(restrict) void* calloc(size_t n, size_t size) {
return ShimCalloc(n, size, nullptr);
}
// _msize() is the Windows equivalent of malloc_size().
size_t _msize(void* memblock) {
return ShimGetSizeEstimate(memblock, nullptr);
}
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
return ShimAlignedMalloc(size, alignment, nullptr);
}
__declspec(restrict) void* _aligned_realloc(void* address,
size_t size,
size_t alignment) {
return ShimAlignedRealloc(address, size, alignment, nullptr);
}
void _aligned_free(void* address) {
ShimAlignedFree(address, nullptr);
}
// _recalloc_base is called by CRT internally.
__declspec(restrict) void* _recalloc_base(void* block,
size_t count,
size_t size) {
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
base::CheckedNumeric<size_t> new_block_size_checked = count;
new_block_size_checked *= size;
const size_t new_block_size = new_block_size_checked.ValueOrDie();
void* const new_block = realloc(block, new_block_size);
if (new_block != nullptr && old_block_size < new_block_size) {
memset(static_cast<char*>(new_block) + old_block_size, 0,
new_block_size - old_block_size);
}
return new_block;
}
__declspec(restrict) void* _malloc_base(size_t size) {
return malloc(size);
}
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
return calloc(n, size);
}
void _free_base(void* block) {
free(block);
}
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
return _recalloc_base(block, count, size);
}
// The following uncommon _aligned_* routines are not used in Chromium and have
// been shimmed to immediately crash to ensure that implementations are added if
// uses are introduced.
__declspec(restrict) void* _aligned_recalloc(void* address,
size_t num,
size_t size,
size_t alignment) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_realloc(void* address,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
size_t num,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
} // extern "C"
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_

View File

@ -0,0 +1,23 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Workaround for crosbug:629593. Using AFDO on the tcmalloc files is
// causing problems. The tcmalloc files depend on stack layouts and
// AFDO can mess with them. Better not to use AFDO there. This is a
// temporary hack. We will add a mechanism in the build system to
// avoid using -fauto-profile for tcmalloc files.
#include "build/chromeos_buildflags.h"
#if !defined(__clang__) && \
(BUILDFLAG(IS_CHROMEOS_ASH) || (__GNUC__ > 5 && __GNUC__ < 7))
// Note that this option only seems to be available in the chromeos GCC 4.9
// toolchain, and stock GCC 5 upto 7.
#pragma GCC optimize ("no-auto-profile")
#endif
#if defined(TCMALLOC_FOR_DEBUGALLOCATION)
#include "third_party/tcmalloc/chromium/src/debugallocation.cc"
#else
#include "third_party/tcmalloc/chromium/src/tcmalloc.cc"
#endif

View File

@ -0,0 +1,119 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/malloc_zone_functions_mac.h"
#include <atomic>
#include "base/synchronization/lock.h"
namespace base {
namespace allocator {
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
static_assert(std::is_pod<MallocZoneFunctions>::value,
"MallocZoneFunctions must be POD");
void StoreZoneFunctions(const ChromeMallocZone* zone,
MallocZoneFunctions* functions) {
memset(functions, 0, sizeof(MallocZoneFunctions));
functions->malloc = zone->malloc;
functions->calloc = zone->calloc;
functions->valloc = zone->valloc;
functions->free = zone->free;
functions->realloc = zone->realloc;
functions->size = zone->size;
CHECK(functions->malloc && functions->calloc && functions->valloc &&
functions->free && functions->realloc && functions->size);
// These functions might be nullptr.
functions->batch_malloc = zone->batch_malloc;
functions->batch_free = zone->batch_free;
if (zone->version >= 5) {
// Not all custom malloc zones have a memalign.
functions->memalign = zone->memalign;
}
if (zone->version >= 6) {
// This may be nullptr.
functions->free_definite_size = zone->free_definite_size;
}
// Note that zone version 8 introduced a pressure relief callback, and version
// 10 introduced a claimed address callback, but neither are allocation or
// deallocation callbacks and so aren't important to intercept.
functions->context = zone;
}
namespace {
// All modifications to g_malloc_zones are gated behind this lock.
// Dispatch to a malloc zone does not need to acquire this lock.
base::Lock& GetLock() {
static base::Lock* g_lock = new base::Lock;
return *g_lock;
}
void EnsureMallocZonesInitializedLocked() {
GetLock().AssertAcquired();
}
int g_zone_count = 0;
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
EnsureMallocZonesInitializedLocked();
GetLock().AssertAcquired();
for (int i = 0; i < g_zone_count; ++i) {
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
return true;
}
return false;
}
} // namespace
bool StoreMallocZone(ChromeMallocZone* zone) {
base::AutoLock l(GetLock());
EnsureMallocZonesInitializedLocked();
if (IsMallocZoneAlreadyStoredLocked(zone))
return false;
if (g_zone_count == kMaxZoneCount)
return false;
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
++g_zone_count;
// No other thread can possibly see these stores at this point. The code that
// reads these values is triggered after this function returns. so we want to
// guarantee that they are committed at this stage"
std::atomic_thread_fence(std::memory_order_seq_cst);
return true;
}
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
base::AutoLock l(GetLock());
return IsMallocZoneAlreadyStoredLocked(zone);
}
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
const MallocZoneFunctions* functions) {
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
}
int GetMallocZoneCountForTesting() {
base::AutoLock l(GetLock());
return g_zone_count;
}
void ClearAllMallocZonesForTesting() {
base::AutoLock l(GetLock());
EnsureMallocZonesInitializedLocked();
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
g_zone_count = 0;
}
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,103 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
#include <malloc/malloc.h>
#include <stddef.h>
#include "base/base_export.h"
#include "base/immediate_crash.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
size_t num_items,
size_t size);
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
void* ptr,
size_t size);
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
size_t alignment,
size_t size);
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
size_t size,
void** results,
unsigned num_requested);
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
void** to_be_freed,
unsigned num_to_be_freed);
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
void* ptr,
size_t size);
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
struct MallocZoneFunctions {
malloc_type malloc;
calloc_type calloc;
valloc_type valloc;
free_type free;
realloc_type realloc;
memalign_type memalign;
batch_malloc_type batch_malloc;
batch_free_type batch_free;
free_definite_size_type free_definite_size;
size_fn_type size;
const ChromeMallocZone* context;
};
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
MallocZoneFunctions* functions);
static constexpr int kMaxZoneCount = 30;
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
// The array g_malloc_zones stores all information about malloc zones before
// they are shimmed. This information needs to be accessed during dispatch back
// into the zone, and additional zones may be added later in the execution fo
// the program, so the array needs to be both thread-safe and high-performance.
//
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
// never modify the container, which provides thread-safety to iterators. When
// we want to add a MallocZoneFunctions to the container, we:
// 1. Fill in all the fields.
// 2. Update the total zone count.
// 3. Insert a memory barrier.
// 4. Insert our shim.
//
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
// pointer to the original malloc zone. When we wish to dispatch back to the
// original malloc zones, we iterate through the array, looking for a matching
// |context|.
//
// Most allocations go through the default allocator. We will ensure that the
// default allocator is stored as the first MallocZoneFunctions.
//
// Returns whether the zone was successfully stored.
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
ChromeMallocZone* zone,
const MallocZoneFunctions* functions);
BASE_EXPORT int GetMallocZoneCountForTesting();
BASE_EXPORT void ClearAllMallocZonesForTesting();
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
if (g_malloc_zones[i].context == zone)
return g_malloc_zones[i];
}
IMMEDIATE_CRASH();
}
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_

View File

@ -0,0 +1,6 @@
monorail {
component: "Blink>MemoryAllocator>Partition"
}
# Also security-dev@chromium.org
team_email: "platform-architecture-dev@chromium.org"

View File

@ -0,0 +1,11 @@
ajwong@chromium.org
bartekn@chromium.org
haraken@chromium.org
lizeb@chromium.org
palmer@chromium.org
# For PCScan specific changes:
per-file pcscan*=bikineev@chromium.org
per-file pcscan*=mlippautz@chromium.org
per-file object_bitmap*=bikineev@chromium.org
per-file object_bitmap*=mlippautz@chromium.org

View File

@ -0,0 +1,113 @@
# PartitionAlloc Design
This document describes PartitionAlloc at a high level. For documentation about
its implementation, see the comments in `partition_alloc.h`.
[TOC]
## Overview
PartitionAlloc is a memory allocator optimized for security, low allocation
latency (when called appropriately), and good space efficiency (when called
appropriately). This document aims to help you understand how PartitionAlloc
works so that you can use it effectively.
## Partitions And Buckets
A *partition* is a heap that contains certain object types, objects of certain
sizes, or objects of a certain lifetime (as the caller prefers). Callers can
create as many partitions as they need. Each partition is separate and protected
from any other partitions.
Each partition holds multiple buckets. A *bucket* is a region in a partition
that contains similar-sized objects.
PartitionAlloc aligns each object allocation with the closest bucket size. For
example, if a partition has 3 buckets for 64 bytes, 256 bytes, and 1024 bytes,
then PartitionAlloc will satisfy an allocation request for 128 bytes by rounding
it up to 256 bytes and allocating from the second bucket.
## Performance
The current implementation is optimized for the main thread use-case. For
example, PartitionAlloc doesn't have threaded caches.
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
paths of allocation and deallocation require just 2 (reasonably predictable)
branches. The number of operations in the fast paths is minimal, leading to the
possibility of inlining.
For an example of how to use partitions to get good performance and good safety,
see Blink's usage, as described in `wtf/allocator/Allocator.md`.
Large allocations (> kMaxBucketed == 960KB) are realized by direct
memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger
bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential
allocations in a SuperPage.
`PartitionRoot<internal::ThreadSafe>::Alloc()` acquires a lock for thread
safety. (The current implementation uses a spin lock on the assumption that
thread contention will be rare in its callers. The original caller was Blink,
where this is generally true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a
`PartitionRoot<internal::NotThreadSafe>::Alloc()` or otherwise using
`PartitionAlloc<internal::NotThreadSafe>`. Callers can also arrange for low
contention, such as by using a dedicated partition for single-threaded,
latency-critical allocations.
Because PartitionAlloc guarantees that address space regions used for one
partition are never reused for other partitions, partitions can eat a large
amount of virtual address space (even if not of actual memory).
Mixing various random objects in the same partition will generally lead to lower
efficiency. For good performance, group similar objects into the same partition.
## Security
Security is one of the most important goals of PartitionAlloc.
PartitionAlloc guarantees that different partitions exist in different regions
of the process' address space. When the caller has freed all objects contained
in a page in a partition, PartitionAlloc returns the physical memory to the
operating system, but continues to reserve the region of address space.
PartitionAlloc will only reuse an address space region for the same partition.
PartitionAlloc also guarantees that:
* Linear overflows cannot corrupt into the partition. (There is a guard page at
the beginning of each partition.)
* Linear overflows cannot corrupt out of the partition. (There is a guard page
at the end of each partition.)
* Linear overflow or underflow cannot corrupt the allocation metadata.
PartitionAlloc records metadata in a dedicated region out-of-line (not adjacent
to objects).
* Objects of different sizes will likely be allocated in different buckets, and
hence at different addresses. One page can contain only similar-sized objects.
* Dereference of a freelist pointer should fault.
* Partial pointer overwrite of freelist pointer should fault.
* Large allocations have guard pages at the beginning and end.
## Alignment
PartitionAlloc doesn't have explicit support for a `posix_memalign()` type call,
however it provides some guarantees on the alignment of returned pointers.
All pointers are aligned on the smallest allocation granularity, namely
`sizeof(void*)`. Additionally, for power-of-two sized allocations, the behavior
depends on the compilation flags:
* With `DCHECK_IS_ON()`, returned pointers are never guaranteed to be aligned on
more than 16 bytes.
* Otherwise, the returned pointer is guaranteed to be aligned on
`min(allocation_size, system page size)`.
See the tests in `partition_alloc_unittest.cc` for more details.

View File

@ -0,0 +1,390 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager.h"
#if defined(OS_APPLE)
#include <sys/mman.h>
#endif
#include <algorithm>
#include <limits>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/lazy_instance.h"
#include "base/notreached.h"
#include "base/stl_util.h"
namespace base {
namespace internal {
namespace {
base::LazyInstance<AddressPoolManager>::Leaky g_address_pool_manager =
LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
AddressPoolManager* AddressPoolManager::GetInstance() {
return g_address_pool_manager.Pointer();
}
#if defined(PA_HAS_64_BITS_POINTERS)
namespace {
// This will crash if the range cannot be decommitted.
void DecommitPages(void* address, size_t size) {
#if defined(OS_APPLE)
// MAP_FIXED replaces an existing mapping with a new one, when the address is
// already part of a mapping. Since newly-created mappings are guaranteed to
// be zero-filled, this has the desired effect. It is only required on macOS,
// as on other operating systems, |DecommitSystemPages()| provides the same
// behavior.
void* ptr = mmap(address, size, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
PA_CHECK(ptr == address);
#else
DecommitSystemPages(address, size, PageUpdatePermissions);
#endif
}
} // namespace
constexpr size_t AddressPoolManager::Pool::kMaxBits;
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
for (pool_handle i = 0; i < base::size(pools_); ++i) {
if (!pools_[i].IsInitialized()) {
pools_[i].Initialize(ptr, length);
return i + 1;
}
}
NOTREACHED();
return 0;
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < base::size(pools_); ++i)
pools_[i].Reset();
}
void AddressPoolManager::Remove(pool_handle handle) {
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
pool->Reset();
}
char* AddressPoolManager::Reserve(pool_handle handle,
void* requested_address,
size_t length) {
Pool* pool = GetPool(handle);
if (!requested_address)
return reinterpret_cast<char*>(pool->FindChunk(length));
const bool is_available = pool->TryReserveChunk(
reinterpret_cast<uintptr_t>(requested_address), length);
if (is_available)
return static_cast<char*>(requested_address);
return reinterpret_cast<char*>(pool->FindChunk(length));
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
void* ptr,
size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
DecommitPages(ptr, length);
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
}
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
PA_CHECK(ptr != 0);
PA_CHECK(!(ptr & kSuperPageOffsetMask));
PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr;
#if DCHECK_IS_ON()
address_end_ = ptr + length;
PA_DCHECK(address_begin_ < address_end_);
#endif
total_bits_ = length / kSuperPageSize;
PA_CHECK(total_bits_ <= kMaxBits);
base::AutoLock scoped_lock(lock_);
alloc_bitset_.reset();
bit_hint_ = 0;
}
bool AddressPoolManager::Pool::IsInitialized() {
return address_begin_ != 0;
}
void AddressPoolManager::Pool::Reset() {
address_begin_ = 0;
}
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
base::AutoLock scoped_lock(lock_);
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t need_bits = requested_size >> kSuperPageShift;
// Use first-fit policy to find an available chunk from free chunks. Start
// from |bit_hint_|, because we know there are no free chunks before.
size_t beg_bit = bit_hint_;
size_t curr_bit = bit_hint_;
while (true) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_)
return 0;
bool found = true;
for (; curr_bit < end_bit; ++curr_bit) {
if (alloc_bitset_.test(curr_bit)) {
// The bit was set, so this chunk isn't entirely free. Set |found=false|
// to ensure the outer loop continues. However, continue the inner loop
// to set |beg_bit| just past the last set bit in the investigated
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
// next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1;
found = false;
if (bit_hint_ == curr_bit)
++bit_hint_;
}
}
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
// mark as allocated) and return the allocated address.
if (found) {
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(!alloc_bitset_.test(i));
alloc_bitset_.set(i);
}
if (bit_hint_ == beg_bit) {
bit_hint_ = end_bit;
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if DCHECK_IS_ON()
PA_DCHECK(address + requested_size <= address_end_);
#endif
return address;
}
}
NOTREACHED();
return 0;
}
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
size_t requested_size) {
base::AutoLock scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
const size_t need_bits = requested_size / kSuperPageSize;
const size_t end_bit = begin_bit + need_bits;
// Check that requested address is not too high.
if (end_bit > total_bits_)
return false;
// Check if any bit of the requested region is set already.
for (size_t i = begin_bit; i < end_bit; ++i) {
if (alloc_bitset_.test(i))
return false;
}
// Otherwise, set the bits.
for (size_t i = begin_bit; i < end_bit; ++i) {
alloc_bitset_.set(i);
}
return true;
}
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
base::AutoLock scoped_lock(lock_);
PA_DCHECK(!(address & kSuperPageOffsetMask));
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
PA_DCHECK(address_begin_ <= address);
#if DCHECK_IS_ON()
PA_DCHECK(address + free_size <= address_end_);
#endif
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
for (size_t i = beg_bit; i < end_bit; ++i) {
PA_DCHECK(alloc_bitset_.test(i));
alloc_bitset_.reset(i);
}
bit_hint_ = std::min(bit_hint_, beg_bit);
}
AddressPoolManager::Pool::Pool() = default;
AddressPoolManager::Pool::~Pool() = default;
#else // defined(PA_HAS_64_BITS_POINTERS)
static_assert(
kSuperPageSize %
AddressPoolManagerBitmap::kBytesPer1BitOfNormalBucketBitmap ==
0,
"kSuperPageSize must be a multiple of kBytesPer1BitOfNormalBucketBitmap.");
static_assert(
kSuperPageSize /
AddressPoolManagerBitmap::kBytesPer1BitOfNormalBucketBitmap >
0,
"kSuperPageSize must be larger than kBytesPer1BitOfNormalBucketBitmap.");
static_assert(AddressPoolManagerBitmap::kGuardBitsOfNormalBucketBitmap >=
AddressPoolManagerBitmap::kGuardOffsetOfNormalBucketBitmap,
"kGuardBitsOfNormalBucketBitmap must be larger than or equal to "
"kGuardOffsetOfNormalBucketBitmap.");
template <size_t bitsize>
void SetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(!bitmap.test(i));
bitmap.set(i);
}
}
template <size_t bitsize>
void ResetBitmap(std::bitset<bitsize>& bitmap,
size_t start_bit,
size_t bit_length) {
const size_t end_bit = start_bit + bit_length;
PA_DCHECK(start_bit <= bitsize);
PA_DCHECK(end_bit <= bitsize);
for (size_t i = start_bit; i < end_bit; ++i) {
PA_DCHECK(bitmap.test(i));
bitmap.reset(i);
}
}
char* AddressPoolManager::Reserve(pool_handle handle,
void* requested_address,
size_t length) {
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
char* ptr = reinterpret_cast<char*>(
AllocPages(requested_address, length, kSuperPageSize, PageInaccessible,
PageTag::kPartitionAlloc));
if (UNLIKELY(!ptr))
return nullptr;
MarkUsed(handle, ptr, length);
return ptr;
}
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
void* ptr,
size_t length) {
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(ptr);
PA_DCHECK(!(ptr_as_uintptr & kSuperPageOffsetMask));
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
MarkUnused(handle, ptr_as_uintptr, length);
FreePages(ptr, length);
}
void AddressPoolManager::MarkUsed(pool_handle handle,
const char* address,
size_t length) {
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(address);
AutoLock guard(AddressPoolManagerBitmap::GetLock());
if (handle == kDirectMapHandle) {
SetBitmap(AddressPoolManagerBitmap::directmap_bits_,
ptr_as_uintptr / PageAllocationGranularity(),
length / PageAllocationGranularity());
} else {
PA_DCHECK(handle == kNormalBucketHandle);
PA_DCHECK(!(length & kSuperPageOffsetMask));
// If BUILDFLAG(MAKE_GIGACAGE_GRANULARITY_PARTITION_PAGE_SIZE) is defined,
// make IsManagedByNormalBucketPool return false when an address
// inside the first or the last PartitionPageSize()-bytes
// block is given:
//
// ------+---+---------------+---+----
// memory ..... | B | managed by PA | B | ...
// regions ------+---+---------------+---+----
//
// B: PartitionPageSize()-bytes block. This is used by
// PartitionAllocator and is not available for callers.
//
// This is required to avoid crash caused by the following code:
//
// {
// CheckedPtr<T> ptr = allocateFromNotPartitionAllocator(X * sizeof(T));
// for (size_t i = 0; i < X; i ++) { ...; ptr++; }
// // |ptr| may point an address inside 'B'.
// }
//
// Suppose that |ptr| points to an address inside B after the loop. So when
// exiting the scope, IsManagedByNormalBucketPool(ptr) returns true without
// the barrier blocks. Since the memory is not allocated by Partition
// Allocator, ~CheckedPtr will cause crash.
SetBitmap(
AddressPoolManagerBitmap::normal_bucket_bits_,
(ptr_as_uintptr >>
AddressPoolManagerBitmap::kBitShiftOfNormalBucketBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfNormalBucketBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfNormalBucketBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfNormalBucketBitmap);
}
}
void AddressPoolManager::MarkUnused(pool_handle handle,
uintptr_t address,
size_t length) {
AutoLock guard(AddressPoolManagerBitmap::GetLock());
// Currently, address regions allocated by kNormalBucketHandle are never freed
// in PartitionAlloc, except on error paths. Thus we have LIKELY for
// kDirectMapHandle
if (LIKELY(handle == kDirectMapHandle)) {
ResetBitmap(AddressPoolManagerBitmap::directmap_bits_,
address / PageAllocationGranularity(),
length / PageAllocationGranularity());
} else {
PA_DCHECK(handle == kNormalBucketHandle);
PA_DCHECK(!(length & kSuperPageOffsetMask));
// If BUILDFLAG(MAKE_GIGACAGE_GRANULARITY_PARTITION_PAGE_SIZE) is defined,
// make IsManagedByNormalBucketPool return false when an address
// inside the first or the last PartitionPageSize()-bytes block is given.
// (See MarkUsed comment)
ResetBitmap(
AddressPoolManagerBitmap::normal_bucket_bits_,
(address >> AddressPoolManagerBitmap::kBitShiftOfNormalBucketBitmap) +
AddressPoolManagerBitmap::kGuardOffsetOfNormalBucketBitmap,
(length >> AddressPoolManagerBitmap::kBitShiftOfNormalBucketBitmap) -
AddressPoolManagerBitmap::kGuardBitsOfNormalBucketBitmap);
}
}
void AddressPoolManager::ResetForTesting() {
AutoLock guard(AddressPoolManagerBitmap::GetLock());
AddressPoolManagerBitmap::directmap_bits_.reset();
AddressPoolManagerBitmap::normal_bucket_bits_.reset();
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
AddressPoolManager::AddressPoolManager() = default;
AddressPoolManager::~AddressPoolManager() = default;
} // namespace internal
} // namespace base

View File

@ -0,0 +1,147 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#include <bitset>
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/atomicops.h"
#include "base/lazy_instance.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
namespace base {
namespace internal {
// (64bit version)
// AddressPoolManager takes a reserved virtual address space and manages address
// space allocation.
//
// AddressPoolManager (currently) supports up to 2 pools. Each pool manages a
// contiguous reserved address space. Alloc() takes a pool_handle and returns
// address regions from the specified pool. Free() also takes a pool_handle and
// returns the address region back to the manager.
//
// (32bit version)
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
// address regions using bitmaps. IsManagedByPartitionAllocDirectMap and
// IsManagedByPartitionAllocNormalBuckets use the bitmaps to judge whether a
// given address is managed by the direct map or normal buckets.
class BASE_EXPORT AddressPoolManager {
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
public:
static constexpr uint64_t kNormalBucketMaxSize =
#if defined(PA_HAS_64_BITS_POINTERS)
16 * kGiB;
#else
4 * kGiB;
#endif
static AddressPoolManager* GetInstance();
#if defined(PA_HAS_64_BITS_POINTERS)
pool_handle Add(uintptr_t address, size_t length);
void Remove(pool_handle handle);
#endif
// Reserves address space from GigaCage.
char* Reserve(pool_handle handle, void* requested_address, size_t length);
// Frees address space back to GigaCage and decommits underlying system pages.
void UnreserveAndDecommit(pool_handle handle, void* ptr, size_t length);
void ResetForTesting();
#if !defined(PA_HAS_64_BITS_POINTERS)
static bool IsManagedByDirectMapPool(const void* address) {
return AddressPoolManagerBitmap::IsManagedByDirectMapPool(address);
}
static bool IsManagedByNormalBucketPool(const void* address) {
return AddressPoolManagerBitmap::IsManagedByNormalBucketPool(address);
}
#endif
private:
AddressPoolManager();
~AddressPoolManager();
#if defined(PA_HAS_64_BITS_POINTERS)
class Pool {
public:
Pool();
~Pool();
void Initialize(uintptr_t ptr, size_t length);
bool IsInitialized();
void Reset();
uintptr_t FindChunk(size_t size);
void FreeChunk(uintptr_t address, size_t size);
bool TryReserveChunk(uintptr_t address, size_t size);
private:
// The bitset stores the allocation state of the address pool. 1 bit per
// super-page: 1 = allocated, 0 = free.
static constexpr size_t kMaxBits = kNormalBucketMaxSize / kSuperPageSize;
base::Lock lock_;
std::bitset<kMaxBits> alloc_bitset_ GUARDED_BY(lock_);
// An index of a bit in the bitset before which we know for sure there all
// 1s. This is a best-effort hint in the sense that there still may be lots
// of 1s after this index, but at least we know there is no point in
// starting the search before it.
size_t bit_hint_ GUARDED_BY(lock_);
size_t total_bits_ = 0;
uintptr_t address_begin_ = 0;
#if DCHECK_IS_ON()
uintptr_t address_end_ = 0;
#endif
};
ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
return &pools_[handle - 1];
}
static constexpr size_t kNumPools = 2;
Pool pools_[kNumPools];
#else // defined(PA_HAS_64_BITS_POINTERS)
void MarkUsed(pool_handle handle, const char* address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
static constexpr pool_handle kDirectMapHandle = 1;
static constexpr pool_handle kNormalBucketHandle = 2;
friend internal::pool_handle GetDirectMapPool();
friend internal::pool_handle GetNormalBucketPool();
#endif // defined(PA_HAS_64_BITS_POINTERS)
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
DISALLOW_COPY_AND_ASSIGN(AddressPoolManager);
};
#if !defined(PA_HAS_64_BITS_POINTERS)
ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
return AddressPoolManager::kDirectMapHandle;
}
ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
return AddressPoolManager::kNormalBucketHandle;
}
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -0,0 +1,32 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/lazy_instance.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace base {
namespace internal {
namespace {
LazyInstance<Lock>::Leaky g_lock = LAZY_INSTANCE_INITIALIZER;
} // namespace
Lock& AddressPoolManagerBitmap::GetLock() {
return g_lock.Get();
}
std::bitset<AddressPoolManagerBitmap::kDirectMapBits>
AddressPoolManagerBitmap::directmap_bits_; // GUARDED_BY(GetLock())
std::bitset<AddressPoolManagerBitmap::kNormalBucketBits>
AddressPoolManagerBitmap::normal_bucket_bits_; // GUARDED_BY(GetLock())
} // namespace internal
} // namespace base
#endif // !defined(PA_HAS_64_BITS_POINTERS)

View File

@ -0,0 +1,87 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
#include <bitset>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/partition_alloc_buildflags.h"
#include "base/synchronization/lock.h"
#if !defined(PA_HAS_64_BITS_POINTERS)
namespace base {
namespace internal {
// AddressPoolManagerBitmap is the bitmap that tracks whether a given address is
// managed by the direct map or normal buckets.
class BASE_EXPORT AddressPoolManagerBitmap {
public:
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
#if BUILDFLAG(MAKE_GIGACAGE_GRANULARITY_PARTITION_PAGE_SIZE)
static constexpr size_t kBitShiftOfNormalBucketBitmap = PartitionPageShift();
static constexpr size_t kBytesPer1BitOfNormalBucketBitmap =
PartitionPageSize();
static constexpr size_t kGuardOffsetOfNormalBucketBitmap = 1;
static constexpr size_t kGuardBitsOfNormalBucketBitmap = 2;
#else
static constexpr size_t kBitShiftOfNormalBucketBitmap = kSuperPageShift;
static constexpr size_t kBytesPer1BitOfNormalBucketBitmap = kSuperPageSize;
static constexpr size_t kGuardOffsetOfNormalBucketBitmap = 0;
static constexpr size_t kGuardBitsOfNormalBucketBitmap = 0;
#endif
static constexpr size_t kNormalBucketBits =
kAddressSpaceSize / kBytesPer1BitOfNormalBucketBitmap;
static constexpr size_t kDirectMapBits =
kAddressSpaceSize / PageAllocationGranularity();
static bool IsManagedByDirectMapPool(const void* address) {
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
// It is safe to read |directmap_bits_| without a lock since the caller is
// responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return TS_UNCHECKED_READ(directmap_bits_)
.test(address_as_uintptr / PageAllocationGranularity());
}
static bool IsManagedByNormalBucketPool(const void* address) {
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
// It is safe to read |normal_bucket_bits_| without a lock since the caller
// is responsible for guaranteeing that the address is inside a valid
// allocation and the deallocation call won't race with this call.
return TS_UNCHECKED_READ(normal_bucket_bits_)
.test(address_as_uintptr >> kBitShiftOfNormalBucketBitmap);
}
private:
friend class AddressPoolManager;
static Lock& GetLock();
static std::bitset<kDirectMapBits> directmap_bits_ GUARDED_BY(GetLock());
static std::bitset<kNormalBucketBits> normal_bucket_bits_
GUARDED_BY(GetLock());
};
} // namespace internal
ALWAYS_INLINE bool IsManagedByPartitionAllocDirectMap(const void* address) {
return internal::AddressPoolManagerBitmap::IsManagedByDirectMapPool(address);
}
ALWAYS_INLINE bool IsManagedByPartitionAllocNormalBuckets(const void* address) {
return internal::AddressPoolManagerBitmap::IsManagedByNormalBucketPool(
address);
}
} // namespace base
#endif // !defined(PA_HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -0,0 +1,17 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
namespace base {
namespace internal {
using pool_handle = unsigned;
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_

View File

@ -0,0 +1,68 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/check_op.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <windows.h> // Must be in front of other Windows header files.
#include <VersionHelpers.h>
#endif
namespace base {
void* GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(RandomValue());
#if defined(ARCH_CPU_64_BITS)
random <<= 32ULL;
random |= static_cast<uintptr_t>(RandomValue());
// The ASLRMask() and ASLROffset() constants will be suitable for the
// OS and build configuration.
#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false;
static bool windows_81_initialized = false;
if (!windows_81_initialized) {
windows_81 = IsWindows8Point1OrGreater();
windows_81_initialized = true;
}
if (!windows_81) {
random &= internal::ASLRMaskBefore8_10();
} else {
random &= internal::ASLRMask();
}
random += internal::ASLROffset();
#else
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#else // defined(ARCH_CPU_32_BITS)
#if defined(OS_WIN)
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it.
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
if (!is_wow64)
return nullptr;
#endif // defined(OS_WIN)
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // defined(ARCH_CPU_32_BITS)
PA_DCHECK(!(random & PageAllocationGranularityOffsetMask()));
return reinterpret_cast<void*>(random);
}
} // namespace base

View File

@ -0,0 +1,266 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
// Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly.
BASE_EXPORT void* GetRandomPageBase();
namespace internal {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
AslrAddress(uintptr_t mask) {
return mask & PageAllocationGranularityBaseMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
// Turn off formatting, because the thicket of nested ifdefs below is
// incomprehensible without indentation. It is also incomprehensible with
// indentation, but the only other option is a combinatorial explosion of
// *_{win,linux,mac,foo}_{32,64}.h files.
//
// clang-format off
#if defined(ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// We shouldn't allocate system pages at all for sanitizer builds. However,
// we do, and if random hint addresses interfere with address ranges
// hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See
// https://crbug.com/539863.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrAddress(0x007fffffffffULL);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x7e8000000000ULL);
}
#elif defined(OS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Older
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
// and may cause a carry, use 47 and 43 bit masks. See
// http://www.alex-ionescu.com/?p=246
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(47);
}
constexpr ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
return AslrMask(43);
}
// Try not to map pages into the range where Windows loads DLLs by default.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return 0x80000000ULL;
}
#elif defined(OS_APPLE)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
// is destroyed. Using a virtual address space that is too large causes a
// leak of about 1 wired [can never be paged out] page per call to mmap. The
// page is only reclaimed when the process is killed. Confine the hint to a
// 39-bit section of the virtual address space.
//
// This implementation adapted from
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32.
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#if defined(ARCH_CPU_X86_64)
// Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_ARM64)
#if defined(OS_ANDROID)
// Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#else
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
// could cause a carry.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(38);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
#endif
#elif defined(ARCH_CPU_PPC64)
#if defined(OS_AIX)
// AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x400000000000ULL);
}
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(42);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#elif defined(ARCH_CPU_S390X)
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(40);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(29);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
#if defined(OS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the
// bottom half of the top half of the address space (that is, the third
// quarter). Because we do not MAP_FIXED, this will be treated only as a
// hint -- the system will not fail to mmap because something else
// happens to already be mapped at our random address. We deliberately
// set the hint high enough to get well above the system's break (that
// is, the heap); Solaris and illumos will try the hint and if that
// fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x80000000ULL);
}
#elif defined(OS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x90000000ULL);
}
#else // !defined(OS_SOLARIS) && !defined(OS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7.
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
#endif // defined(OS_POSIX)
#elif defined(ARCH_CPU_32_BITS)
// This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here.
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
#else
#error Please tell us about your exotic hardware! Sounds interesting.
#endif // defined(ARCH_CPU_32_BITS)
// clang-format on
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_

View File

@ -0,0 +1,47 @@
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file contains a test function for checking Arm's branch target
# identification (BTI) feature, which helps mitigate jump-oriented
# programming. To get it working, BTI instructions must be executed
# on a compatible core, and the executable pages must be mapped with
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
# correctly:
# 1) Allocate a read-write page.
# 2) Copy between the start and end symbols into that page.
# 3) Set the page to read-execute with PROT_BTI.
# 4) Call the first offset of the page, verify the result.
# 5) Call the second offset of the page (skipping the landing pad).
# Verify that it crashes as expected.
# This test works irrespective of whether BTI is enabled for C/C++
# objects via -mbranch-protection=standard.
.text
.global arm_bti_test_function
.global arm_bti_test_function_invalid_offset
.global arm_bti_test_function_end
arm_bti_test_function:
# Mark the start of this function as a valid call target.
bti jc
add x0, x0, #1
arm_bti_test_function_invalid_offset:
# This label simulates calling an incomplete function.
# Jumping here should crash systems which support BTI.
add x0, x0, #2
ret
arm_bti_test_function_end:
nop
.pushsection .note.gnu.property, "a";
.balign 8;
.long 4;
.long 0x10;
.long 0x5;
.asciz "GNU";
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4;
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
.long 0;
.popsection

View File

@ -0,0 +1,17 @@
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS
#include "build/build_config.h"
#if defined(ARCH_CPU_ARM64)
extern "C" {
int64_t arm_bti_test_function(int64_t);
int64_t arm_bti_test_function_invalid_offset(int64_t);
void arm_bti_test_function_end(void);
}
#endif // defined(ARCH_CPU_ARM64)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS

View File

@ -0,0 +1,44 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/extended_api.h"
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace base {
namespace internal {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->with_thread_cache)
return;
internal::ThreadCacheRegistry::Instance().PurgeAll();
root->with_thread_cache = false;
// Doesn't destroy the thread cache object(s). For background threads, they
// will be collected (and free cached memory) at thread destruction
// time. For the main thread, we leak it.
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace internal
void DisablePartitionAllocThreadCacheForProcess() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::DisableThreadCacheForRootIfEnabled(
base::internal::PartitionAllocMalloc::Allocator());
internal::DisableThreadCacheForRootIfEnabled(
base::internal::PartitionAllocMalloc::OriginalAllocator());
internal::DisableThreadCacheForRootIfEnabled(
base::internal::PartitionAllocMalloc::AlignedAllocator());
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
} // namespace base

View File

@ -0,0 +1,31 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/base_export.h"
namespace base {
// Disables the thread cache for the entire process.
//
// Saves memory but slows down the allocator *significantly*. Only use for
// configurations that are very memory-constrained or performance-insensitive
// processes.
//
// Must preferably be called from the main thread, when no/few threads have
// been started.
//
// Otherwise, there are several things that can happen:
// 1. Another thread is currently temporarily disabling the thread cache, and
// will re-enable it, negating this call's effect.
// 2. Other threads' caches cannot be purged from here, and would retain their
// cached memory until thread destruction (where it is reclaimed).
//
// These are not correctness issues, at worst in the first case, memory is not
// saved, and in the second one, *some* of the memory is leaked.
BASE_EXPORT void DisablePartitionAllocThreadCacheForProcess();
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -0,0 +1,158 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/pcscan.h"
#include "base/bind.h"
#include "base/location.h"
#include "base/metrics/histogram_functions.h"
#include "base/trace_event/base_tracing.h"
namespace base {
namespace {
template <bool thread_safe>
void Insert(std::set<PartitionRoot<thread_safe>*>* partitions,
PartitionRoot<thread_safe>* partition) {
PA_DCHECK(partition);
auto it_and_whether_inserted = partitions->insert(partition);
PA_DCHECK(it_and_whether_inserted.second);
}
template <bool thread_safe>
void Remove(std::set<PartitionRoot<thread_safe>*>* partitions,
PartitionRoot<thread_safe>* partition) {
PA_DCHECK(partition);
size_t erased_count = partitions->erase(partition);
PA_DCHECK(erased_count == 1u);
}
} // namespace
// static
PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
static NoDestructor<PartitionAllocMemoryReclaimer> instance;
return instance.get();
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::Start(
scoped_refptr<SequencedTaskRunner> task_runner) {
PA_DCHECK(!timer_);
PA_DCHECK(task_runner);
{
AutoLock lock(lock_);
PA_DCHECK(!thread_safe_partitions_.empty());
}
// This does not need to run on the main thread, however there are a few
// reasons to do it there:
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
// is more likely in cache when executing on the main thread.
// - Memory reclaim takes the partition lock for each partition. As a
// consequence, while reclaim is running, the main thread is unlikely to be
// able to make progress, as it would be waiting on the lock.
// - Finally, this runs in idle time only, so there should be no visible
// impact.
//
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
// seconds is useful. Since this is meant to run during idle time only, it is
// a reasonable starting point balancing effectivenes vs cost. See
// crbug.com/942512 for details and experimental results.
constexpr TimeDelta kInterval = TimeDelta::FromSeconds(4);
timer_ = std::make_unique<RepeatingTimer>();
timer_->SetTaskRunner(task_runner);
// Here and below, |Unretained(this)| is fine as |this| lives forever, as a
// singleton.
timer_->Start(
FROM_HERE, kInterval,
BindRepeating(&PartitionAllocMemoryReclaimer::ReclaimPeriodically,
Unretained(this)));
}
PartitionAllocMemoryReclaimer::PartitionAllocMemoryReclaimer() = default;
PartitionAllocMemoryReclaimer::~PartitionAllocMemoryReclaimer() = default;
void PartitionAllocMemoryReclaimer::ReclaimAll() {
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages |
PartitionPurgeForceAllFreed;
Reclaim(kFlags);
}
void PartitionAllocMemoryReclaimer::ReclaimPeriodically() {
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages;
Reclaim(kFlags);
}
void PartitionAllocMemoryReclaimer::Reclaim(int flags) {
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
// PCScan quarantines freed slots. Trigger the scan first to let it call
// FreeNoHooksImmediate on slots that pass the quarantine.
//
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
// so that the slots are actually freed. (This is done synchronously only for
// the current thread.)
//
// Lastly decommit empty slot spans and lastly try to discard unused pages at
// the end of the remaining active slots.
{
using PCScan = internal::PCScan<internal::ThreadSafe>;
const auto invocation_mode = flags & PartitionPurgeForceAllFreed
? PCScan::InvocationMode::kForcedBlocking
: PCScan::InvocationMode::kBlocking;
PCScan::Instance().PerformScanIfNeeded(invocation_mode);
}
#if defined(PA_THREAD_CACHE_SUPPORTED)
internal::ThreadCacheRegistry::Instance().PurgeAll();
#endif
for (auto* partition : thread_safe_partitions_)
partition->PurgeMemory(flags);
for (auto* partition : thread_unsafe_partitions_)
partition->PurgeMemory(flags);
}
void PartitionAllocMemoryReclaimer::ResetForTesting() {
AutoLock lock(lock_);
timer_ = nullptr;
thread_safe_partitions_.clear();
thread_unsafe_partitions_.clear();
}
} // namespace base

View File

@ -0,0 +1,75 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
#include <memory>
#include <set>
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/location.h"
#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
#include "base/thread_annotations.h"
#include "base/timer/timer.h"
namespace base {
// Posts and handles memory reclaim tasks for PartitionAlloc.
//
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
// context of the provided |SequencedTaskRunner|, meaning that the caller must
// take care of this runner being compatible with the various partitions.
//
// Singleton as this runs as long as the process is alive, and
// having multiple instances would be wasteful.
class BASE_EXPORT PartitionAllocMemoryReclaimer {
public:
static PartitionAllocMemoryReclaimer* Instance();
// Internal. Do not use.
// Registers a partition to be tracked by the reclaimer.
void RegisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
void RegisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Internal. Do not use.
// Unregisters a partition to be tracked by the reclaimer.
void UnregisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
void UnregisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Starts the periodic reclaim. Should be called once.
void Start(scoped_refptr<SequencedTaskRunner> task_runner);
// Triggers an explicit reclaim now reclaiming all free memory
void ReclaimAll();
// Triggers an explicit reclaim now to reclaim as much free memory as
// possible.
void ReclaimPeriodically();
private:
PartitionAllocMemoryReclaimer();
~PartitionAllocMemoryReclaimer();
// |flags| is an OR of base::PartitionPurgeFlags
void Reclaim(int flags);
void ReclaimAndReschedule();
void ResetForTesting();
// Schedules periodic |Reclaim()|.
std::unique_ptr<RepeatingTimer> timer_;
Lock lock_;
std::set<PartitionRoot<internal::ThreadSafe>*> thread_safe_partitions_
GUARDED_BY(lock_);
std::set<PartitionRoot<internal::NotThreadSafe>*> thread_unsafe_partitions_
GUARDED_BY(lock_);
friend class NoDestructor<PartitionAllocMemoryReclaimer>;
friend class PartitionAllocMemoryReclaimerTest;
DISALLOW_COPY_AND_ASSIGN(PartitionAllocMemoryReclaimer);
};
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_

View File

@ -0,0 +1,233 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OBJECT_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OBJECT_BITMAP_H_
#include <climits>
#include <cstddef>
#include <cstdint>
#include <algorithm>
#include <array>
#include <atomic>
#include <tuple>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
namespace base {
namespace internal {
// Bitmap which tracks beginning of allocated objects. The bitmap can be safely
// accessed from multiple threads, but this doesn't imply visibility on the data
// (i.e. no ordering guaranties, since relaxed atomics are used underneath). The
// bitmap itself must be created inside a page, size and alignment of which are
// specified as template arguments |PageSize| and |PageAlignment|.
// |ObjectAlignment| specifies the minimal alignment of objects that are
// allocated inside a page (serves as the granularity in the bitmap).
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
class ObjectBitmap final {
using CellType = uintptr_t;
static constexpr size_t kBitsPerCell = sizeof(CellType) * CHAR_BIT;
static constexpr size_t kBitmapSize =
(PageSize + ((kBitsPerCell * ObjectAlignment) - 1)) /
(kBitsPerCell * ObjectAlignment);
static constexpr size_t kPageOffsetMask = PageAlignment - 1;
static constexpr size_t kPageBaseMask = ~kPageOffsetMask;
public:
enum class AccessType : uint8_t {
kAtomic,
kNonAtomic,
};
static constexpr size_t kPageSize = PageSize;
static constexpr size_t kPageAlignment = PageAlignment;
static constexpr size_t kObjectAlignment = ObjectAlignment;
static constexpr size_t kMaxEntries = kBitmapSize * kBitsPerCell;
static constexpr uintptr_t kSentinel = 0u;
inline ObjectBitmap();
template <AccessType = AccessType::kAtomic>
ALWAYS_INLINE void SetBit(uintptr_t address);
template <AccessType = AccessType::kAtomic>
ALWAYS_INLINE void ClearBit(uintptr_t address);
template <AccessType = AccessType::kAtomic>
ALWAYS_INLINE bool CheckBit(uintptr_t address) const;
// Iterates all objects recorded in the bitmap.
//
// The callback is of type
// void(Address)
// and is passed the object address as parameter.
template <AccessType = AccessType::kAtomic, typename Callback>
inline void Iterate(Callback) const;
// Same as above, but also clears the bitmap while iterating.
template <AccessType = AccessType::kAtomic, typename Callback>
inline void IterateAndClear(Callback);
inline void Clear();
private:
std::atomic<CellType>& AsAtomicCell(size_t cell_index) {
return reinterpret_cast<std::atomic<CellType>&>(bitmap_[cell_index]);
}
const std::atomic<CellType>& AsAtomicCell(size_t cell_index) const {
return reinterpret_cast<const std::atomic<CellType>&>(bitmap_[cell_index]);
}
template <AccessType = AccessType::kAtomic, typename Callback, bool Clear>
inline void IterateImpl(Callback);
template <AccessType>
ALWAYS_INLINE CellType LoadCell(size_t cell_index) const;
ALWAYS_INLINE static constexpr std::pair<size_t, size_t> ObjectIndexAndBit(
uintptr_t);
std::array<CellType, kBitmapSize> bitmap_;
};
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
constexpr size_t
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::kSentinel;
// The constructor can be omitted, but the Chromium's clang plugin wrongly
// warns that the type is not trivially constructible.
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
inline ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::ObjectBitmap() =
default;
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type>
ALWAYS_INLINE void
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::SetBit(
uintptr_t address) {
size_t cell_index, object_bit;
std::tie(cell_index, object_bit) = ObjectIndexAndBit(address);
if (access_type == AccessType::kNonAtomic) {
bitmap_[cell_index] |= (static_cast<CellType>(1) << object_bit);
return;
}
auto& cell = AsAtomicCell(cell_index);
cell.fetch_or(static_cast<CellType>(1) << object_bit,
std::memory_order_relaxed);
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type>
ALWAYS_INLINE void
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::ClearBit(
uintptr_t address) {
size_t cell_index, object_bit;
std::tie(cell_index, object_bit) = ObjectIndexAndBit(address);
if (access_type == AccessType::kNonAtomic) {
bitmap_[cell_index] &= ~(static_cast<CellType>(1) << object_bit);
return;
}
auto& cell = AsAtomicCell(cell_index);
cell.fetch_and(~(static_cast<CellType>(1) << object_bit),
std::memory_order_relaxed);
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type>
ALWAYS_INLINE bool
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::CheckBit(
uintptr_t address) const {
size_t cell_index, object_bit;
std::tie(cell_index, object_bit) = ObjectIndexAndBit(address);
return LoadCell<access_type>(cell_index) &
(static_cast<CellType>(1) << object_bit);
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type>
ALWAYS_INLINE
typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::CellType
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::LoadCell(
size_t cell_index) const {
if (access_type == AccessType::kNonAtomic)
return bitmap_[cell_index];
return AsAtomicCell(cell_index).load(std::memory_order_relaxed);
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
ALWAYS_INLINE constexpr std::pair<size_t, size_t>
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::ObjectIndexAndBit(
uintptr_t address) {
const uintptr_t offset_in_page = address & kPageOffsetMask;
const size_t object_number = offset_in_page / kObjectAlignment;
const size_t cell_index = object_number / kBitsPerCell;
PA_DCHECK(kBitmapSize > cell_index);
const size_t bit = object_number % kBitsPerCell;
return {cell_index, bit};
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type,
typename Callback,
bool should_clear>
inline void ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::IterateImpl(
Callback callback) {
// The bitmap (|this|) is allocated inside the page with |kPageAlignment|.
const uintptr_t base = reinterpret_cast<uintptr_t>(this) & kPageBaseMask;
for (size_t cell_index = 0; cell_index < kBitmapSize; ++cell_index) {
CellType value = LoadCell<access_type>(cell_index);
while (value) {
const int trailing_zeroes = base::bits::CountTrailingZeroBits(value);
const size_t object_number =
(cell_index * kBitsPerCell) + trailing_zeroes;
const uintptr_t object_address =
base + (kObjectAlignment * object_number);
callback(object_address);
// Clear current object bit in temporary value to advance iteration.
value &= ~(static_cast<CellType>(1) << trailing_zeroes);
}
if (should_clear) {
if (access_type == AccessType::kNonAtomic) {
bitmap_[cell_index] = 0;
} else {
AsAtomicCell(cell_index).store(0, std::memory_order_relaxed);
}
}
}
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type,
typename Callback>
inline void ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::Iterate(
Callback callback) const {
const_cast<ObjectBitmap&>(*this)
.template IterateImpl<access_type, Callback, false>(std::move(callback));
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
template <typename ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::
AccessType access_type,
typename Callback>
inline void
ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::IterateAndClear(
Callback callback) {
IterateImpl<access_type, Callback, true>(std::move(callback));
}
template <size_t PageSize, size_t PageAlignment, size_t ObjectAlignment>
void ObjectBitmap<PageSize, PageAlignment, ObjectAlignment>::Clear() {
std::fill(bitmap_.begin(), bitmap_.end(), '\0');
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OBJECT_BITMAP_H_

View File

@ -0,0 +1,37 @@
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/compiler_specific.h"
#include "base/process/memory.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <windows.h>
#endif
namespace {
// The crash is generated in a NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] NOINLINE void NOT_TAIL_CALLED OnNoMemory(size_t size) {
base::internal::RunPartitionAllocOomCallback();
base::TerminateBecauseOutOfMemory(size);
IMMEDIATE_CRASH();
}
} // namespace
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.
// OOM_CRASH(size) is called by users of PageAllocator (including
// PartitionAlloc) to signify an allocation failure from the platform.
#define OOM_CRASH(size) \
do { \
OnNoMemory(size); \
} while (0)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_

View File

@ -0,0 +1,28 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check.h"
namespace base {
namespace {
PartitionAllocOomCallback g_oom_callback;
} // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
PA_DCHECK(!g_oom_callback);
g_oom_callback = callback;
}
namespace internal {
void RunPartitionAllocOomCallback() {
if (g_oom_callback)
g_oom_callback();
}
} // namespace internal
} // namespace base

View File

@ -0,0 +1,24 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
#include "base/base_export.h"
namespace base {
typedef void (*PartitionAllocOomCallback)();
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
// allocation failure from the platform.
BASE_EXPORT void SetPartitionAllocOomCallback(
PartitionAllocOomCallback callback);
namespace internal {
BASE_EXPORT void RunPartitionAllocOomCallback();
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_

View File

@ -0,0 +1,292 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include <limits.h>
#include <atomic>
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/lazy_instance.h"
#include "base/no_destructor.h"
#include "base/numerics/checked_math.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <windows.h>
#endif
#if defined(OS_WIN)
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
#elif defined(OS_POSIX)
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
#elif defined(OS_FUCHSIA)
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
#else
#error Platform not supported.
#endif
namespace base {
namespace {
LazyInstance<Lock>::Leaky g_reserve_lock = LAZY_INSTANCE_INITIALIZER;
// We may reserve/release address space on different threads.
Lock& GetReserveLock() {
return g_reserve_lock.Get();
}
std::atomic<size_t> g_total_mapped_address_space;
// We only support a single block of reserved address space.
void* s_reservation_address GUARDED_BY(GetReserveLock()) = nullptr;
size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0;
void* AllocPagesIncludingReserved(void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
void* ret = SystemAllocPages(address, length, accessibility, page_tag);
if (ret == nullptr) {
const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
ret = SystemAllocPages(address, length, accessibility, page_tag);
}
}
return ret;
}
// Trims |base| to given |trim_length| and |alignment|.
//
// On failure, on Windows, this function returns nullptr and frees |base|.
void* TrimMapping(void* base,
size_t base_length,
size_t trim_length,
uintptr_t alignment,
PageAccessibilityConfiguration accessibility) {
size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (alignment - 1);
if (pre_slack) {
pre_slack = alignment - pre_slack;
}
size_t post_slack = base_length - pre_slack - trim_length;
PA_DCHECK(base_length >= trim_length || pre_slack || post_slack);
PA_DCHECK(pre_slack < base_length);
PA_DCHECK(post_slack < base_length);
return TrimMappingInternal(base, base_length, trim_length, accessibility,
pre_slack, post_slack);
}
} // namespace
void* SystemAllocPages(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
PageAllocationGranularityOffsetMask()));
void* ptr = SystemAllocPagesInternal(hint, length, accessibility, page_tag);
if (ptr)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
return ptr;
}
void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PA_DCHECK(length >= PageAllocationGranularity());
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
PA_DCHECK(base::bits::IsPowerOfTwo(align));
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
PageAllocationGranularityOffsetMask()));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one.
if (address == nullptr) {
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
}
// First try to force an exact-size, aligned allocation from our random base.
#if defined(ARCH_CPU_32_BITS)
// On 32 bit systems, first try one random aligned address, and then try an
// aligned address derived from the value of |ret|.
constexpr int kExactSizeTries = 2;
#else
// On 64 bit systems, try 3 random aligned addresses.
constexpr int kExactSizeTries = 3;
#endif
for (int i = 0; i < kExactSizeTries; ++i) {
void* ret =
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
return ret;
// Free the memory and try again.
FreePages(ret, length);
} else {
// |ret| is null; if this try was unhinted, we're OOM.
if (kHintIsAdvisory || address == nullptr)
return nullptr;
}
#if defined(ARCH_CPU_32_BITS)
// For small address spaces, try the first aligned address >= |ret|. Note
// |ret| may be null, in which case |address| becomes null.
address = reinterpret_cast<void*>(
(reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
align_base_mask);
#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
#endif
}
// Make a larger allocation so we can force alignment.
size_t try_length = length + (align - PageAllocationGranularity());
PA_CHECK(try_length >= length);
void* ret;
do {
// Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret != nullptr && (ret = TrimMapping(ret, try_length, length, align,
accessibility)) == nullptr);
return ret;
}
void FreePages(void* address, size_t length) {
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
FreePagesInternal(address, length);
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & SystemPageOffsetMask()));
return TrySetSystemPagesAccessInternal(address, length, accessibility);
}
void SetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
PA_DCHECK(!(length & SystemPageOffsetMask()));
SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
PA_DCHECK(!(length & SystemPageOffsetMask()));
DecommitSystemPagesInternal(address, length, accessibility_disposition);
}
void RecommitSystemPages(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
PA_DCHECK(!(length & SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageInaccessible);
RecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition);
}
bool TryRecommitSystemPages(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Duplicated because we want errors to be reported at a lower level in the
// crashing case.
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
PA_DCHECK(!(length & SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageInaccessible);
return TryRecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition);
}
void DiscardSystemPages(void* address, size_t length) {
PA_DCHECK(!(length & SystemPageOffsetMask()));
DiscardSystemPagesInternal(address, length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
AutoLock guard(GetReserveLock());
if (s_reservation_address == nullptr) {
void* mem =
SystemAllocPages(nullptr, size, PageInaccessible, PageTag::kChromium);
if (mem != nullptr) {
// We guarantee this alignment when reserving address space.
PA_DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
PageAllocationGranularityOffsetMask()));
s_reservation_address = mem;
s_reservation_size = size;
return true;
}
}
return false;
}
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
AutoLock guard(GetReserveLock());
if (!s_reservation_address)
return false;
FreePages(s_reservation_address, s_reservation_size);
s_reservation_address = nullptr;
s_reservation_size = 0;
return true;
}
bool HasReservationForTesting() {
AutoLock guard(GetReserveLock());
return s_reservation_address != nullptr;
}
uint32_t GetAllocPageErrorCode() {
return s_allocPageErrorCode;
}
size_t GetTotalMappedSize() {
return g_total_mapped_address_space;
}
} // namespace base

View File

@ -0,0 +1,249 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#include <stdint.h>
#include <cstddef>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
enum PageAccessibilityConfiguration {
PageInaccessible,
PageRead,
PageReadWrite,
// This flag is mapped to PageReadWrite on systems that
// don't support MTE.
PageReadWriteTagged,
PageReadExecute,
// This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
PageReadWriteExecute,
};
// Use for De/RecommitSystemPages API.
enum PageAccessibilityDisposition {
// Enforces permission update (Decommit will set to PageInaccessible;
// Recommit will set to whatever was requested, other than PageInaccessible).
PageUpdatePermissions,
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
// only).
PageKeepPermissionsIfPossible,
};
// macOS supports tagged memory regions, to help in debugging. On Android,
// these tags are used to name anonymous mappings.
enum class PageTag {
kFirst = 240, // Minimum tag value.
kBlinkGC = 252, // Blink GC pages.
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
kChromium = 254, // Chromium page.
kV8 = 255, // V8 heap pages.
kLast = kV8 // Maximum tag value.
};
// Allocate one or more pages.
//
// The requested |address| is just a hint; the actual address returned may
// differ. The returned address will be aligned at least to |align| bytes.
// |length| is in bytes, and must be a multiple of
// |PageAllocationGranularity()|. |align| is in bytes, and must be a
// power-of-two multiple of |PageAllocationGranularity()|.
//
// If |address| is null, then a suitable and randomized address will be chosen
// automatically.
//
// |page_accessibility| controls the permission of the allocated pages.
// PageInaccessible means uncommitted.
//
// |page_tag| is used on some platforms to identify the source of the
// allocation. Use PageTag::kChromium as a catch-all category.
//
// This call will return null if the allocation cannot be satisfied.
BASE_EXPORT void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration page_accessibility,
PageTag tag);
// Free one or more pages starting at |address| and continuing for |length|
// bytes.
//
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
// |length| must be a multiple of |PageAllocationGranularity()|.
BASE_EXPORT void FreePages(void* address, size_t length);
// Mark one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
BASE_EXPORT WARN_UNUSED_RESULT bool TrySetSystemPagesAccess(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Mark one or more system pages, starting at |address| with the given
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
// bytes.
//
// Performs a CHECK that the operation succeeds.
BASE_EXPORT void SetSystemPagesAccess(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility);
// Decommit one or more system pages starting at |address| and continuing for
// |length| bytes. |address| and |length| must be aligned to a system page
// boundary.
//
// |accessibility_disposition| allows to specify whether the pages should be
// made inaccessible (PageUpdatePermissions), or left as is
// (PageKeepPermissionsIfPossible, POSIX & Fuchsia only). The latter should only
// be used as an optimization if you really know what you're doing.
// TODO(bartekn): Ideally, all callers should use PageUpdatePermissions,
// for better security, but that may lead to a perf regression. Tracked at
// http://crbug.com/766882.
//
// Decommitted means that physical resources (RAM or swap) backing the allocated
// virtual address range may be released back to the system, but the address
// space is still allocated to the process (possibly using up page table entries
// or other accounting resources). There is no guarantee that the pages are
// zeroed. Unless PageKeepPermissionsIfPossible disposition is used, any access
// to a decommitted region of memory is an error and will generate a fault.
//
// This operation is not atomic on all platforms.
//
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
// processes will not fault when touching a committed memory region. There is
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
// best-effort allocated resources on the first touch. If PageUpdatePermissions
// disposition is used, this API behaves in a platform-agnostic way by
// simulating the Windows "decommit" state by both discarding the region
// (allowing the OS to avoid swap operations) *and* changing the page
// protections so accesses fault.
//
// This API will crash if the operation cannot be performed.
BASE_EXPORT void DecommitSystemPages(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition);
// Recommit one or more system pages, starting at |address| and continuing for
// |length| bytes with the given |page_accessibility| (must not be
// PageInaccsessible). |address| and |length| must be aligned to a system page
// boundary.
//
// |accessibility_disposition| allows to specify whether the page permissions
// should be set to |page_accessibility| (PageUpdatePermissions), or left as is
// (PageKeepPermissionsIfPossible, POSIX & Fuchsia only). The latter can only be
// used if the pages were previously accessible and decommitted with
// PageKeepPermissionsIfPossible. It is ok, however, to recommit with
// PageUpdatePermissions even if pages were decommitted with
// PageKeepPermissionsIfPossible (merely losing an optimization).
//
// This operation is not atomic on all platforms.
//
// This API will crash if the operation cannot be performed.
BASE_EXPORT void RecommitSystemPages(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition);
// Like RecommitSystemPages(), but returns false instead of crashing.
BASE_EXPORT bool TryRecommitSystemPages(
void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility,
PageAccessibilityDisposition accessibility_disposition) WARN_UNUSED_RESULT;
// Discard one or more system pages starting at |address| and continuing for
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
//
// Discarding is a hint to the system that the page is no longer required. The
// hint may:
// - Do nothing.
// - Discard the page immediately, freeing up physical pages.
// - Discard the page at some time in the future in response to memory
// pressure.
//
// Only committed pages should be discarded. Discarding a page does not decommit
// it, and it is valid to discard an already-discarded page. A read or write to
// a discarded page will not fault.
//
// Reading from a discarded page may return the original page content, or a page
// full of zeroes.
//
// Writing to a discarded page is the only guaranteed way to tell the system
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundUpToSystemPage(uintptr_t address) {
return (address + SystemPageOffsetMask()) & SystemPageBaseMask();
}
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundDownToSystemPage(uintptr_t address) {
return address & SystemPageBaseMask();
}
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) {
return (address + PageAllocationGranularityOffsetMask()) &
PageAllocationGranularityBaseMask();
}
// Rounds down |address| to the previous multiple of
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) {
return address & PageAllocationGranularityBaseMask();
}
// Reserves (at least) |size| bytes of address space, aligned to
// |PageAllocationGranularity()|. This can be called early on to make it more
// likely that large allocations will succeed. Returns true if the reservation
// succeeded, false if the reservation failed or a reservation was already made.
BASE_EXPORT bool ReserveAddressSpace(size_t size);
// Releases any reserved address space. |AllocPages| calls this automatically on
// an allocation failure. External allocators may also call this on failure.
//
// Returns true when an existing reservation was released.
BASE_EXPORT bool ReleaseReservation();
// Returns true if there is currently an address space reservation.
BASE_EXPORT bool HasReservationForTesting();
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
// (POSIX) or |VirtualAlloc| (Windows) fails.
BASE_EXPORT uint32_t GetAllocPageErrorCode();
// Returns the total amount of mapped pages from all clients of
// PageAllocator. These pages may or may not be committed. This is mostly useful
// to assess address space pressure.
BASE_EXPORT size_t GetTotalMappedSize();
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_

View File

@ -0,0 +1,115 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#include <stddef.h>
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if defined(OS_APPLE)
#include <mach/vm_page_size.h>
// Although page allocator constants are not constexpr, they are run-time
// constant. Because the underlying variables they access, such as vm_page_size,
// are not marked const, the compiler normally has no way to know that they
// dont change and must obtain their values whenever it can't prove that they
// haven't been modified, even if they had already been obtained previously.
// Attaching __attribute__((const)) to these declarations allows these redundant
// accesses to be omitted under optimization such as common subexpression
// elimination.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
#else
// When defined, page size constants are fixed at compile time. When not
// defined, they may vary at run time.
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
// Use this macro to declare a function as constexpr or not based on whether
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
#endif
namespace {
#if !defined(OS_APPLE)
constexpr ALWAYS_INLINE int PageAllocationGranularityShift() {
#if defined(OS_WIN) || defined(ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
// sizes. Since 64kB is the de facto standard on the platform and binaries
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB
#else
return 12; // 4kB
#endif
}
#endif
} // namespace
namespace base {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularity() {
#if defined(OS_APPLE)
return vm_page_size;
#else
return 1 << PageAllocationGranularityShift();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularityOffsetMask() {
return PageAllocationGranularity() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PageAllocationGranularityBaseMask() {
return ~PageAllocationGranularityOffsetMask();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageSize() {
#if defined(OS_WIN)
return 4096;
#else
return PageAllocationGranularity();
#endif
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageOffsetMask() {
return SystemPageSize() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
SystemPageBaseMask() {
return ~SystemPageOffsetMask();
}
static constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
static constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
// See DecommitSystemPages(), this is not guaranteed to be synchronous on all
// platforms.
static constexpr bool kDecommittedPagesAreAlwaysZeroed =
#if defined(OS_APPLE)
false;
#else
true;
#endif
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_

View File

@ -0,0 +1,17 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
namespace base {
void* SystemAllocPages(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_

View File

@ -0,0 +1,227 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file implements memory allocation primitives for PageAllocator using
// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
// object that corresponds to a set of memory pages. VMO pages may be mapped
// to an address space. The code below creates VMOs for each memory allocations
// and maps them to the default address space of the current process.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/notreached.h"
namespace base {
namespace {
// Returns VMO name for a PageTag.
const char* PageTagToName(PageTag tag) {
switch (tag) {
case PageTag::kBlinkGC:
return "cr_blink_gc";
case PageTag::kPartitionAlloc:
return "cr_partition_alloc";
case PageTag::kChromium:
return "cr_chromium";
case PageTag::kV8:
return "cr_v8";
default:
PA_DCHECK(false);
return "";
}
}
zx_vm_option_t PageAccessibilityToZxVmOptions(
PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
case PageRead:
return ZX_VM_PERM_READ;
case PageReadWrite:
case PageReadWriteTagged:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
case PageReadExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
case PageReadWriteExecute:
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
default:
NOTREACHED();
FALLTHROUGH;
case PageInaccessible:
return 0;
}
}
} // namespace
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
// |hint| is not advisory.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{0};
void* SystemAllocPagesInternal(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
zx::vmo vmo;
zx_status_t status = zx::vmo::create(length, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(INFO, status) << "zx_vmo_create";
return nullptr;
}
const char* vmo_name = PageTagToName(page_tag);
status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
// VMO names are used only for debugging, so failure to set a name is not
// fatal.
ZX_DCHECK(status == ZX_OK, status);
if (page_tag == PageTag::kV8) {
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
// in the new VMO.
status = vmo.replace_as_executable(zx::resource(), &vmo);
if (status != ZX_OK) {
ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
return nullptr;
}
}
zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
uint64_t vmar_offset = 0;
if (hint) {
vmar_offset = reinterpret_cast<uint64_t>(hint);
options |= ZX_VM_SPECIFIC;
}
uint64_t address;
status =
zx::vmar::root_self()->map(options, vmar_offset, vmo,
/*vmo_offset=*/0, length, &address);
if (status != ZX_OK) {
// map() is expected to fail if |hint| is set to an already-in-use location.
if (!hint) {
ZX_DLOG(ERROR, status) << "zx_vmar_map";
}
return nullptr;
}
return reinterpret_cast<void*>(address);
}
void* TrimMappingInternal(void* base,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
uint64_t base_address = reinterpret_cast<uint64_t>(base);
// Unmap head if necessary.
if (pre_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
ZX_CHECK(status == ZX_OK, status);
}
// Unmap tail if necessary.
if (post_slack) {
zx_status_t status = zx::vmar::root_self()->unmap(
base_address + pre_slack + trim_length, post_slack);
ZX_CHECK(status == ZX_OK, status);
}
return reinterpret_cast<void*>(base_address + pre_slack);
}
bool TrySetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect2(
PageAccessibilityToZxVmOptions(accessibility),
reinterpret_cast<uint64_t>(address), length);
return status == ZX_OK;
}
void SetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
zx_status_t status = zx::vmar::root_self()->protect2(
PageAccessibilityToZxVmOptions(accessibility),
reinterpret_cast<uint64_t>(address), length);
ZX_CHECK(status == ZX_OK, status);
}
void FreePagesInternal(void* address, size_t length) {
uint64_t address_int = reinterpret_cast<uint64_t>(address);
zx_status_t status = zx::vmar::root_self()->unmap(address_int, length);
ZX_CHECK(status == ZX_OK, status);
}
void DiscardSystemPagesInternal(void* address, size_t length) {
// TODO(https://crbug.com/1022062): Mark pages as discardable, rather than
// forcibly de-committing them immediately, when Fuchsia supports it.
uint64_t address_int = reinterpret_cast<uint64_t>(address);
zx_status_t status = zx::vmar::root_self()->op_range(
ZX_VMO_OP_DECOMMIT, address_int, length, nullptr, 0);
ZX_CHECK(status == ZX_OK, status);
}
void DecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
if (accessibility_disposition == PageUpdatePermissions) {
SetSystemPagesAccess(address, length, PageInaccessible);
}
// TODO(https://crbug.com/1022062): Review whether this implementation is
// still appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
// discardable API.
DiscardSystemPagesInternal(address, length);
}
void RecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition == PageUpdatePermissions) {
SetSystemPagesAccess(address, length, accessibility);
}
}
bool TryRecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On Fuchsia systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition == PageUpdatePermissions) {
return TrySetSystemPagesAccess(address, length, accessibility);
}
return true;
}
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_

View File

@ -0,0 +1,60 @@
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/cpu.h"
#include "base/notreached.h"
#include <sys/mman.h>
// PROT_BTI requests a page that supports BTI landing pads.
#define PROT_BTI 0x10
// PROT_MTE requests a page that's suitable for memory tagging.
#define PROT_MTE 0x20
namespace base {
// Two helper functions to detect whether we can safely use PROT_BTI
// and PROT_MTE (static CPU triggers a -Wexit-time-destructors warning.)
static bool HasCPUBranchIdentification() {
#if defined(ARCH_CPU_ARM_FAMILY)
CPU cpu = CPU::CreateNoAllocation();
return cpu.has_bti();
#else
return false;
#endif
}
static bool HasCPUMemoryTaggingExtension() {
#if defined(ARCH_CPU_ARM_FAMILY)
CPU cpu = CPU::CreateNoAllocation();
return cpu.has_mte();
#else
return false;
#endif
}
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
static const bool has_bti = HasCPUBranchIdentification();
static const bool has_mte = HasCPUMemoryTaggingExtension();
switch (accessibility) {
case PageRead:
return PROT_READ;
case PageReadWrite:
return PROT_READ | PROT_WRITE;
case PageReadWriteTagged:
return PROT_READ | PROT_WRITE | (has_mte ? PROT_MTE : 0u);
case PageReadExecute:
return PROT_READ | PROT_EXEC | (has_bti ? PROT_BTI : 0u);
case PageReadWriteExecute:
return PROT_READ | PROT_WRITE | PROT_EXEC;
default:
NOTREACHED();
FALLTHROUGH;
case PageInaccessible:
return PROT_NONE;
}
}
} // namespace base

View File

@ -0,0 +1,333 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
#include <errno.h>
#include <sys/mman.h>
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check_op.h"
#include "base/cpu.h"
#include "base/notreached.h"
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h"
#if defined(OS_APPLE)
#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
#include "base/mac/scoped_cftyperef.h"
#include <Availability.h>
#include <Security/Security.h>
#include <mach/mach.h>
#endif
#if defined(OS_ANDROID)
#include <sys/prctl.h>
#endif
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
#include <sys/resource.h>
#include <algorithm>
#endif
#include "base/allocator/partition_allocator/page_allocator.h"
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#if defined(OS_APPLE)
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although its
// available on iOS and other Apple operating systems. It is, in fact, present
// on the system since macOS 10.12.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wavailability"
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task)
#if __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12
// When redeclaring something previously declared as unavailable, the
// weak_import attribute wont be applied unless manually set.
__attribute__((weak_import))
#endif // DT < 10.12
API_AVAILABLE(macos(10.12));
#pragma clang diagnostic pop
#endif // OS_APPLE
namespace base {
namespace {
#if defined(OS_ANDROID)
const char* PageTagToName(PageTag tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_ndk the kernel keeps a pointer to the name instead
// of copying it.
//
// Having the name in .rodata ensures that the pointer remains valid as
// long as the mapping is alive.
switch (tag) {
case PageTag::kBlinkGC:
return "blink_gc";
case PageTag::kPartitionAlloc:
return "partition_alloc";
case PageTag::kChromium:
return "chromium";
case PageTag::kV8:
return "v8";
default:
PA_DCHECK(false);
return "";
}
}
#endif // defined(OS_ANDROID)
#if defined(OS_APPLE)
// Tests whether the version of macOS supports the MAP_JIT flag and if the
// current process is signed with the hardened runtime and the allow-jit
// entitlement, returning whether MAP_JIT should be used to allocate regions
// that will contain JIT-compiled executable code.
bool UseMapJit() {
if (!mac::IsAtLeastOS10_14()) {
// MAP_JIT existed before macOS 10.14, but had somewhat different semantics.
// Only one MAP_JIT region was permitted per process, but calling code here
// will very likely require more than one such region. Since MAP_JIT is not
// strictly necessary to write code to a region and then execute it on these
// older OSes, dont use it at all.
return false;
}
// Until determining that the hardened runtime is enabled, early returns will
// return true, so that MAP_JIT will be used. This is important on arm64,
// which only allows pages to be simultaneously writable and executable when
// in a region allocated with MAP_JIT, regardless of code signing options. On
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
// executable fails with EPERM. Although this is not enforced on x86_64,
// MAP_JIT is harmless in that case.
ScopedCFTypeRef<SecTaskRef> task(SecTaskCreateFromSelf(kCFAllocatorDefault));
if (!task) {
return true;
}
uint32_t flags = SecTaskGetCodeSignStatus(task);
if (!(flags & kSecCodeSignatureRuntime)) {
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
// == CS_RUNTIME.
return true;
}
// The hardened runtime is enabled. From this point on, early returns must
// return false, indicating that MAP_JIT is not to be used. Its an error
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
// entitlement is specified.
ScopedCFTypeRef<CFTypeRef> jit_entitlement(SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement)
return false;
return mac::CFCast<CFBooleanRef>(jit_entitlement.get()) == kCFBooleanTrue;
}
#endif // defined(OS_APPLE)
} // namespace
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
std::atomic<int32_t> s_allocPageErrorCode{0};
int GetAccessFlags(PageAccessibilityConfiguration accessibility);
void* SystemAllocPagesInternal(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
#if defined(OS_APPLE)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
PA_DCHECK(PageTag::kFirst <= page_tag);
PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else
int fd = -1;
#endif
int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if defined(OS_APPLE)
// On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit"
// code signing entitlement and allocating the region with the MAP_JIT flag.
static const bool kUseMapJit = UseMapJit();
if (page_tag == PageTag::kV8 && kUseMapJit) {
map_flags |= MAP_JIT;
}
#endif
void* ret = mmap(hint, length, access_flag, map_flags, fd, 0);
if (ret == MAP_FAILED) {
s_allocPageErrorCode = errno;
ret = nullptr;
}
#if defined(OS_ANDROID)
// On Android, anonymous mappings can have a name attached to them. This is
// useful for debugging, and double-checking memory attribution.
if (ret) {
// No error checking on purpose, testing only.
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
PageTagToName(page_tag));
}
#endif
return ret;
}
bool TrySetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
return 0 ==
HANDLE_EINTR(mprotect(address, length, GetAccessFlags(accessibility)));
}
void SetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
int access_flags = GetAccessFlags(accessibility);
const int ret = HANDLE_EINTR(mprotect(address, length, access_flags));
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
// kernel data structures cannot be allocated, (2) the address range is
// invalid, or (3) this would split an existing mapping in a way that would
// exceed the maximum number of allowed mappings.
//
// Neither are very likely, but we still get a lot of crashes here. This is
// because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
// access flags match a "data" mapping, which in our case would be MAP_PRIVATE
// | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
// mm/mprotect.c in the kernel for details.
//
// In this case, we are almost certainly bumping into the sandbox limit, mark
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
OOM_CRASH(length);
PA_PCHECK(0 == ret);
}
void FreePagesInternal(void* address, size_t length) {
PA_PCHECK(0 == munmap(address, length));
}
void* TrimMappingInternal(void* base,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
void* ret = base;
// We can resize the allocation run. Release unneeded memory before and after
// the aligned range.
if (pre_slack) {
FreePages(base, pre_slack);
ret = reinterpret_cast<char*>(base) + pre_slack;
}
if (post_slack) {
FreePages(reinterpret_cast<char*>(ret) + trim_length, post_slack);
}
return ret;
}
void DecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// In POSIX, there is no decommit concept. Discarding is an effective way of
// implementing the Windows semantics where the OS is allowed to not swap the
// pages in the region.
DiscardSystemPages(address, length);
// Make pages inaccessible, unless the caller requested to keep permissions.
//
// Note, there is a small window between these calls when the pages can be
// incorrectly touched and brought back to memory. Not ideal, but doing those
// operaions in the opposite order resulted in PMF regression on Mac (see
// crbug.com/1153021).
if (accessibility_disposition == PageUpdatePermissions) {
SetSystemPagesAccess(address, length, PageInaccessible);
}
}
void RecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition == PageUpdatePermissions) {
SetSystemPagesAccess(address, length, accessibility);
}
#if defined(OS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(address, length, MADV_FREE_REUSE);
#endif
}
bool TryRecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// On POSIX systems, the caller needs to simply read the memory to recommit
// it. However, if decommit changed the permissions, recommit has to change
// them back.
if (accessibility_disposition == PageUpdatePermissions) {
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
if (!ok)
return false;
}
#if defined(OS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more
// details, see https://crbug.com/823915.
madvise(address, length, MADV_FREE_REUSE);
#endif
return true;
}
void DiscardSystemPagesInternal(void* address, size_t length) {
#if defined(OS_APPLE)
int ret = madvise(address, length, MADV_FREE_REUSABLE);
if (ret) {
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
ret = madvise(address, length, MADV_DONTNEED);
}
PA_PCHECK(ret == 0);
#else
// We have experimented with other flags, but with suboptimal results.
//
// MADV_FREE (Linux): Makes our memory measurements less predictable;
// performance benefits unclear.
//
// Therefore, we just do the simple thing: MADV_DONTNEED.
PA_PCHECK(0 == madvise(address, length, MADV_DONTNEED));
#endif
}
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_

View File

@ -0,0 +1,164 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/notreached.h"
namespace base {
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
case PageRead:
return PAGE_READONLY;
case PageReadWrite:
case PageReadWriteTagged:
return PAGE_READWRITE;
case PageReadExecute:
return PAGE_EXECUTE_READ;
case PageReadWriteExecute:
return PAGE_EXECUTE_READWRITE;
default:
NOTREACHED();
FALLTHROUGH;
case PageInaccessible:
return PAGE_NOACCESS;
}
}
void* SystemAllocPagesInternal(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
DWORD access_flag = GetAccessFlags(accessibility);
const DWORD type_flags = (accessibility != PageInaccessible)
? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE;
void* ret = VirtualAlloc(hint, length, type_flags, access_flag);
if (ret == nullptr) {
s_allocPageErrorCode = GetLastError();
}
return ret;
}
void* TrimMappingInternal(void* base,
size_t base_length,
size_t trim_length,
PageAccessibilityConfiguration accessibility,
size_t pre_slack,
size_t post_slack) {
void* ret = base;
if (pre_slack || post_slack) {
// We cannot resize the allocation run. Free it and retry at the aligned
// address within the freed range.
ret = reinterpret_cast<char*>(base) + pre_slack;
FreePages(base, base_length);
ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
}
return ret;
}
bool TrySetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
if (accessibility == PageInaccessible)
return VirtualFree(address, length, MEM_DECOMMIT) != 0;
return nullptr != VirtualAlloc(address, length, MEM_COMMIT,
GetAccessFlags(accessibility));
}
void SetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
if (accessibility == PageInaccessible) {
if (!VirtualFree(address, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
}
} else {
if (!VirtualAlloc(address, length, MEM_COMMIT,
GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT)
OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(ERROR_SUCCESS == error);
}
}
}
void FreePagesInternal(void* address, size_t length) {
PA_CHECK(VirtualFree(address, 0, MEM_RELEASE));
}
void DecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length, PageInaccessible);
}
void RecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length, accessibility);
}
bool TryRecommitSystemPagesInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageAccessibilityDisposition accessibility_disposition) {
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
return TrySetSystemPagesAccess(address, length, accessibility);
}
void DiscardSystemPagesInternal(void* address, size_t length) {
// On Windows, discarded pages are not returned to the system immediately and
// not guaranteed to be zeroed when returned to the application.
using DiscardVirtualMemoryFunction =
DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
static DiscardVirtualMemoryFunction discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
if (discard_virtual_memory ==
reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
// Use DiscardVirtualMemory when available because it releases faster than
// MEM_RESET.
DWORD ret = 1;
if (discard_virtual_memory) {
ret = discard_virtual_memory(address, length);
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
if (ret) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
PA_CHECK(ptr);
}
}
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_

View File

@ -0,0 +1,97 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/bits.h"
namespace base {
namespace internal {
#if defined(PA_HAS_64_BITS_POINTERS)
uintptr_t PartitionAddressSpace::reserved_base_address_ = 0;
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to
// k*PoolOffsetMask, so that PartitionAddressSpace::IsIn*Pool() always returns
// false.
uintptr_t PartitionAddressSpace::direct_map_pool_base_address_ =
kDirectMapPoolOffsetMask;
uintptr_t PartitionAddressSpace::normal_bucket_pool_base_address_ =
kNormalBucketPoolOffsetMask;
pool_handle PartitionAddressSpace::direct_map_pool_ = 0;
pool_handle PartitionAddressSpace::normal_bucket_pool_ = 0;
void PartitionAddressSpace::Init() {
if (IsInitialized())
return;
reserved_base_address_ = reinterpret_cast<uintptr_t>(AllocPages(
nullptr, kDesiredAddressSpaceSize, kReservedAddressSpaceAlignment,
base::PageInaccessible, PageTag::kPartitionAlloc));
PA_CHECK(reserved_base_address_);
uintptr_t current = reserved_base_address_;
direct_map_pool_base_address_ = current;
direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kDirectMapPoolSize);
PA_DCHECK(direct_map_pool_);
PA_DCHECK(!IsInDirectMapPool(reinterpret_cast<void*>(current - 1)));
PA_DCHECK(IsInDirectMapPool(reinterpret_cast<void*>(current)));
current += kDirectMapPoolSize;
PA_DCHECK(IsInDirectMapPool(reinterpret_cast<void*>(current - 1)));
PA_DCHECK(!IsInDirectMapPool(reinterpret_cast<void*>(current)));
normal_bucket_pool_base_address_ = current;
normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kNormalBucketPoolSize);
PA_DCHECK(normal_bucket_pool_);
PA_DCHECK(!IsInNormalBucketPool(reinterpret_cast<void*>(current - 1)));
PA_DCHECK(IsInNormalBucketPool(reinterpret_cast<void*>(current)));
current += kNormalBucketPoolSize;
PA_DCHECK(IsInNormalBucketPool(reinterpret_cast<void*>(current - 1)));
PA_DCHECK(!IsInNormalBucketPool(reinterpret_cast<void*>(current)));
#if PA_ALLOW_PCSCAN
// Reserve memory for PCScan quarantine card table.
void* requested_address =
reinterpret_cast<void*>(normal_bucket_pool_base_address_);
char* actual_address = internal::AddressPoolManager::GetInstance()->Reserve(
normal_bucket_pool_, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated in the beginning of "
"the NormalBucketPool";
SetSystemPagesAccess(actual_address, kSuperPageSize, PageReadWrite);
// Don't take physical memory since PCScan may be switched off.
DiscardSystemPages(actual_address, kSuperPageSize);
#endif
PA_DCHECK(reserved_base_address_ + kDesiredAddressSpaceSize == current);
}
void PartitionAddressSpace::UninitForTesting() {
FreePages(reinterpret_cast<void*>(reserved_base_address_),
kReservedAddressSpaceAlignment);
reserved_base_address_ = 0;
direct_map_pool_base_address_ = kDirectMapPoolOffsetMask;
normal_bucket_pool_base_address_ = kNormalBucketPoolOffsetMask;
direct_map_pool_ = 0;
normal_bucket_pool_ = 0;
internal::AddressPoolManager::GetInstance()->ResetForTesting();
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
} // namespace internal
} // namespace base

View File

@ -0,0 +1,181 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#include <algorithm>
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h"
#include "base/bits.h"
#include "base/notreached.h"
#include "base/partition_alloc_buildflags.h"
#include "build/build_config.h"
#include "build/buildflag.h"
namespace base {
namespace internal {
// The feature is not applicable to 32-bit address space.
#if defined(PA_HAS_64_BITS_POINTERS)
// Reserves address space for PartitionAllocator.
class BASE_EXPORT PartitionAddressSpace {
public:
static ALWAYS_INLINE constexpr uintptr_t NormalBucketPoolBaseMask() {
return kNormalBucketPoolBaseMask;
}
static ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
return direct_map_pool_;
}
static ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
return normal_bucket_pool_;
}
static void Init();
static void UninitForTesting();
static ALWAYS_INLINE bool IsInitialized() {
if (reserved_base_address_) {
PA_DCHECK(direct_map_pool_ != 0);
PA_DCHECK(normal_bucket_pool_ != 0);
return true;
}
PA_DCHECK(direct_map_pool_ == 0);
PA_DCHECK(normal_bucket_pool_ == 0);
return false;
}
static ALWAYS_INLINE bool IsInDirectMapPool(const void* address) {
return (reinterpret_cast<uintptr_t>(address) & kDirectMapPoolBaseMask) ==
direct_map_pool_base_address_;
}
static ALWAYS_INLINE bool IsInNormalBucketPool(const void* address) {
return (reinterpret_cast<uintptr_t>(address) & kNormalBucketPoolBaseMask) ==
normal_bucket_pool_base_address_;
}
static ALWAYS_INLINE uintptr_t NormalBucketPoolBase() {
return normal_bucket_pool_base_address_;
}
// PartitionAddressSpace is static_only class.
PartitionAddressSpace() = delete;
PartitionAddressSpace(const PartitionAddressSpace&) = delete;
void* operator new(size_t) = delete;
void* operator new(size_t, void*) = delete;
private:
// Partition Alloc Address Space
// Reserves 32GiB address space for one direct map pool and one normal bucket
// pool, 16GiB each.
// TODO(bartekn): Look into devices with 39-bit address space that have 256GiB
// user-mode space. Libraries loaded at random addresses may stand in the way
// of reserving a contiguous 48GiB region (even though we're requesting only
// 32GiB, AllocPages may under the covers reserve extra 16GiB to satisfy the
// alignment requirements).
//
// +----------------+ reserved_base_address_ (16GiB aligned)
// | direct map | == direct_map_pool_base_address_
// | pool |
// +----------------+ reserved_base_address_ + 16GiB
// | normal bucket | == normal_bucket_pool_base_address_
// | pool |
// +----------------+ reserved_base_address_ + 32GiB
//
// NOTE! On 64-bit systems with BackupRefPtr enabled, the direct map pool must
// precede normal bucket pool. This is to prevent a pointer immediately past a
// non-GigaCage allocation from falling into the normal bucket pool, thus
// triggering BackupRefPtr mechanism and likely crashing.
static constexpr size_t kGigaBytes = 1024 * 1024 * 1024;
// Pool sizes are flexible, as long as each pool is aligned on its own size
// boundary and the size is a power of two. The entire region is aligned on
// the max pool size boundary, so the further pools only need to care about
// the shift from the beginning of the region (for clarity, the pool sizes are
// declared in the order the pools are allocated).
//
// For example, [16GiB,8GiB] would work, but [8GiB,16GiB] wouldn't (the 2nd
// pool is aligned on 8GiB but needs 16GiB), and [8GiB,8GiB,16GiB,1GiB] would.
static constexpr size_t kDirectMapPoolSize = 16 * kGigaBytes;
static constexpr size_t kNormalBucketPoolSize = 16 * kGigaBytes;
static constexpr size_t kDesiredAddressSpaceSize =
kDirectMapPoolSize + kNormalBucketPoolSize;
static constexpr size_t kReservedAddressSpaceAlignment =
std::max(kDirectMapPoolSize, kNormalBucketPoolSize);
static_assert(bits::IsPowerOfTwo(kDirectMapPoolSize) &&
bits::IsPowerOfTwo(kNormalBucketPoolSize),
"Each pool size should be a power of two.");
static_assert(bits::IsPowerOfTwo(kReservedAddressSpaceAlignment),
"kReservedAddressSpaceAlignment should be a power of two.");
static_assert(kReservedAddressSpaceAlignment >= kDirectMapPoolSize &&
kReservedAddressSpaceAlignment >= kNormalBucketPoolSize,
"kReservedAddressSpaceAlignment should be larger or equal to "
"each pool size.");
static_assert(kReservedAddressSpaceAlignment % kDirectMapPoolSize == 0 &&
(kReservedAddressSpaceAlignment + kDirectMapPoolSize) %
kNormalBucketPoolSize ==
0,
"Each pool should be aligned to its own size");
// Masks used to easy determine belonging to a pool.
static constexpr uintptr_t kDirectMapPoolOffsetMask =
static_cast<uintptr_t>(kDirectMapPoolSize) - 1;
static constexpr uintptr_t kDirectMapPoolBaseMask = ~kDirectMapPoolOffsetMask;
static constexpr uintptr_t kNormalBucketPoolOffsetMask =
static_cast<uintptr_t>(kNormalBucketPoolSize) - 1;
static constexpr uintptr_t kNormalBucketPoolBaseMask =
~kNormalBucketPoolOffsetMask;
// See the comment describing the address layout above.
static uintptr_t reserved_base_address_;
static uintptr_t direct_map_pool_base_address_;
static uintptr_t normal_bucket_pool_base_address_;
static internal::pool_handle direct_map_pool_;
static internal::pool_handle normal_bucket_pool_;
};
ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
// This file is included from checked_ptr.h. This will result in a cycle if it
// includes partition_alloc_features.h where IsPartitionAllocGigaCageEnabled
// resides, because it includes Finch headers which may include checked_ptr.h.
// TODO(bartekn): Uncomment once Finch is no longer used there.
// PA_DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::GetDirectMapPool();
}
ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
// TODO(bartekn): Uncomment once Finch is no longer used there (see above).
// PA_DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::GetNormalBucketPool();
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
} // namespace internal
#if defined(PA_HAS_64_BITS_POINTERS)
ALWAYS_INLINE bool IsManagedByPartitionAllocDirectMap(const void* address) {
return internal::PartitionAddressSpace::IsInDirectMapPool(address);
}
ALWAYS_INLINE bool IsManagedByPartitionAllocNormalBuckets(const void* address) {
return internal::PartitionAddressSpace::IsInNormalBucketPool(address);
}
#endif
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -0,0 +1,62 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#include <cstring>
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/partition_alloc_buildflags.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <windows.h>
#endif
#define PARTITION_EXTRAS_REQUIRED \
(DCHECK_IS_ON() || BUILDFLAG(USE_BACKUP_REF_PTR))
namespace base {
namespace internal {
// This is a `memset` that resists being optimized away. Adapted from
// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
// depend on //third_party, and this is small enough.)
ALWAYS_INLINE void SecureZero(void* p, size_t size) {
#if defined(OS_WIN)
SecureZeroMemory(p, size);
#else
memset(p, 0, size);
// As best as we can tell, this is sufficient to break any optimisations that
// might try to eliminate "superfluous" memsets. If there's an easy way to
// detect memset_s, it would be better to use that.
__asm__ __volatile__("" : : "r"(p) : "memory");
#endif
}
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics.
#if !DCHECK_IS_ON()
ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (UNLIKELY(counter == 0)) {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(base::RandomValue());
}
// If `counter` is 0, this will wrap. That is intentional and OK.
counter--;
return counter == 0;
}
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_

View File

@ -0,0 +1,122 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc.h"
#include <string.h>
#include <memory>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/pcscan.h"
#include "base/partition_alloc_buildflags.h"
namespace base {
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
// This is from page_allocator_constants.h and doesn't really fit here, but
// there isn't a centralized initialization function in page_allocator.cc, so
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
STATIC_ASSERT_OR_PA_CHECK((SystemPageSize() & (SystemPageSize() - 1)) == 0,
"SystemPageSize() must be power of 2");
// Two partition pages are used as guard / metadata page so make sure the
// super page size is bigger.
STATIC_ASSERT_OR_PA_CHECK(PartitionPageSize() * 4 <= kSuperPageSize,
"ok super page size");
STATIC_ASSERT_OR_PA_CHECK(!(kSuperPageSize % PartitionPageSize()),
"ok super page multiple");
// Four system pages gives us room to hack out a still-guard-paged piece
// of metadata in the middle of a guard partition page.
STATIC_ASSERT_OR_PA_CHECK(SystemPageSize() * 4 <= PartitionPageSize(),
"ok partition page size");
STATIC_ASSERT_OR_PA_CHECK(!(PartitionPageSize() % SystemPageSize()),
"ok partition page multiple");
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
kPageMetadataSize,
"PartitionPage should not be too big");
STATIC_ASSERT_OR_PA_CHECK(
kPageMetadataSize * NumPartitionPagesPerSuperPage() <= SystemPageSize(),
"page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size.
STATIC_ASSERT_OR_PA_CHECK(
MaxDirectMapped() <= (1UL << 31) + PageAllocationGranularity(),
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
static_assert(kSmallestBucket == kAlignment, "generic smallest bucket");
static_assert(kMaxBucketed == 983040, "generic max bucketed");
STATIC_ASSERT_OR_PA_CHECK(
MaxSystemPagesPerSlotSpan() < (1 << 8),
"System pages per slot span must be less than 128.");
PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
}
void PartitionAllocGlobalUninitForTesting() {
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
if (features::IsPartitionAllocGigaCageEnabled()) {
#if defined(PA_HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance()->ResetForTesting();
#endif // defined(PA_HAS_64_BITS_POINTERS)
}
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::PCScan<internal::ThreadSafe>::Instance()
.ClearRootsForTesting(); // IN-TEST
internal::g_oom_handling_function = nullptr;
}
namespace internal {
template <bool thread_safe>
PartitionAllocator<thread_safe>::~PartitionAllocator() {
PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
&partition_root_);
}
template <bool thread_safe>
void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
#endif
partition_root_.Init(opts);
partition_root_.ConfigureLazyCommit();
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
&partition_root_);
}
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
template PartitionAllocator<internal::NotThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::NotThreadSafe>::init(
PartitionOptions);
#if DCHECK_IS_ON() && BUILDFLAG(USE_BACKUP_REF_PTR)
void DCheckGetSlotOffsetIsZero(void* ptr) {
// Add kPartitionPastAllocationAdjustment, because PartitionAllocGetSlotStart
// will subtract it.
PA_DCHECK(PartitionAllocGetSlotStart(reinterpret_cast<char*>(ptr) +
kPartitionPastAllocationAdjustment) ==
ptr);
}
#endif
} // namespace internal
} // namespace base

View File

@ -0,0 +1,96 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// ABOUT
// The allocators are designed to be extremely fast, thanks to the following
// properties and design:
// - Just two single (reasonably predicatable) branches in the hot / fast path
// for both allocating and (significantly) freeing.
// - A minimal number of operations in the hot / fast path, with the slow paths
// in separate functions, leading to the possibility of inlining.
// - Each partition page (which is usually multiple physical pages) has a
// metadata structure which allows fast mapping of free() address to an
// underlying bucket.
// - Supports a lock-free API for fast performance in single-threaded cases.
// - The freelist for a given bucket is split across a number of partition
// pages, enabling various simple tricks to try and minimize fragmentation.
// - Fine-grained bucket sizes leading to less waste and better packing.
//
// The following security properties could be investigated in the future:
// - Per-object bucketing (instead of per-size) is mostly available at the API,
// but not used yet.
// - No randomness of freelist entries or bucket position.
// - Better checking for wild pointers in free().
// - Better freelist masking function to guarantee fault on 32-bit.
#include <limits.h>
#include <string.h>
#include <memory>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/pcscan.h"
#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/base_export.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/notreached.h"
#include "base/partition_alloc_buildflags.h"
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
#include "build/buildflag.h"
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#include <stdlib.h>
#endif
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
#endif // defined(ADDRESS_SANITIZER)
namespace base {
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
namespace internal {
template <bool thread_safe>
struct BASE_EXPORT PartitionAllocator {
PartitionAllocator() = default;
~PartitionAllocator();
void init(PartitionOptions);
ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
ALWAYS_INLINE const PartitionRoot<thread_safe>* root() const {
return &partition_root_;
}
private:
PartitionRoot<thread_safe> partition_root_;
};
} // namespace internal
using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
using ThreadUnsafePartitionAllocator =
internal::PartitionAllocator<internal::NotThreadSafe>;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_

View File

@ -0,0 +1,83 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/check.h"
#include "base/debug/alias.h"
#include "base/immediate_crash.h"
#define PA_STRINGIFY_IMPL(s) #s
#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
// When PartitionAlloc is used as the default allocator, we cannot use the
// regular (D)CHECK() macros, as they allocate internally. When an assertion is
// triggered, they format strings, leading to reentrancy in the code, which none
// of PartitionAlloc is designed to support (and especially not for error
// paths).
//
// As a consequence:
// - When PartitionAlloc is not malloc(), use the regular macros
// - Otherwise, crash immediately. This provides worse error messages though.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// For official build discard log strings to reduce binary bloat.
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
// See base/check.h for implementation details.
#define PA_CHECK(condition) \
UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
#else
// PartitionAlloc uses async-signal-safe RawCheck() for error reporting.
// Async-signal-safe functions are guaranteed to not allocate as otherwise they
// could operate with inconsistent allocator state.
#define PA_CHECK(condition) \
UNLIKELY(!(condition)) \
? logging::RawCheck( \
__FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
: EAT_CHECK_STREAM_PARAMS()
#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG)
#if DCHECK_IS_ON()
#define PA_DCHECK(condition) PA_CHECK(condition)
#else
#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // DCHECK_IS_ON()
#define PA_PCHECK(condition) \
if (!(condition)) { \
int error = errno; \
base::debug::Alias(&error); \
IMMEDIATE_CRASH(); \
}
#else
#define PA_CHECK(condition) CHECK(condition)
#define PA_DCHECK(condition) DCHECK(condition)
#define PA_PCHECK(condition) PCHECK(condition)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
// Use this macro to assert on things that are conditionally constexpr as
// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
// is a static_assert. Where determined at run time, this is a PA_CHECK.
// Therefore, this macro must only be used where both a static_assert and a
// PA_CHECK would be viable, that is, within a function, and ideally a function
// that executes only once, early in the program, such as during initialization.
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
static_assert(condition, message)
#else
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
do { \
PA_CHECK(condition) << (message); \
} while (false)
#endif
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_

View File

@ -0,0 +1,289 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include <limits.h>
#include <cstddef>
#include <algorithm>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "build/build_config.h"
#if defined(OS_APPLE)
#include <mach/vm_page_size.h>
#endif
namespace base {
// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
// address space. The only known case where address space is 32-bit is NaCl, so
// eliminate it explicitly. static_assert below ensures that other won't slip
// through.
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
#define PA_HAS_64_BITS_POINTERS
static_assert(sizeof(void*) == 8, "");
#else
static_assert(sizeof(void*) != 8, "");
#endif
// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
// It is typical for a `PartitionPage` to be based on multiple system pages.
// Most references to "page" refer to `PartitionPage`s.
//
// *Super pages* are the underlying system allocations we make. Super pages
// contain multiple partition pages and include space for a small amount of
// metadata per partition page.
//
// Inside super pages, we store *slot spans*. A slot span is a continguous range
// of one or more `PartitionPage`s that stores allocations of the same size.
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current maximum slot span size of 64 KiB and
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
PartitionPageShift() {
return 16; // 64 KiB
}
#elif defined(ARCH_CPU_PPC64)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
PartitionPageShift() {
return 18; // 256 KiB
}
#elif defined(OS_APPLE)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
PartitionPageShift() {
return vm_page_shift + 2;
}
#else
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
PartitionPageShift() {
return 14; // 16 KiB
}
#endif
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageSize() {
return 1 << PartitionPageShift();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageOffsetMask() {
return PartitionPageSize() - 1;
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
PartitionPageBaseMask() {
return ~PartitionPageOffsetMask();
}
// TODO: Should this be 1 if defined(_MIPS_ARCH_LOONGSON)?
static const size_t kMaxPartitionPagesPerSlotSpan = 4;
// To avoid fragmentation via never-used freelist entries, we hand out partition
// freelist sections gradually, in units of the dominant system page size. What
// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
// with freelist pointers right away. Writing freelist pointers will fault and
// dirty a private page, which is very wasteful if we never actually store
// objects there.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
NumSystemPagesPerPartitionPage() {
return PartitionPageSize() / SystemPageSize();
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxSystemPagesPerSlotSpan() {
return NumSystemPagesPerPartitionPage() * kMaxPartitionPagesPerSlotSpan;
}
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
// metadata in the first few pages of each 2 MiB-aligned section. This makes
// freeing memory very fast. 2 MiB size & alignment were chosen, because this
// virtual address block represents a full but single page table allocation on
// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
// (Note, these super pages are backed by 4 KiB system pages and have nothing to
// do with OS concept of "huge pages"/"large pages", even though the size
// coincides.)
//
// The layout of the super page is as follows. The sizes below are the same for
// 32- and 64-bit platforms.
//
// +-----------------------+
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | QuarantineBitmaps |
// | Slot span |
// | Slot span |
// | ... |
// | Slot span |
// | Guard pages (16 KiB) |
// +-----------------------+
//
// QuarantineBitmaps are inserted for partitions that may have PCScan enabled.
//
// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
// that slot spans of different sizes may co-exist with one super page. Even
// slot spans of the same size may support different slot sizes. However, all
// slots within a span have to be of the same size.
//
// The metadata page has the following format. Note that the `PartitionPage`
// that is not at the head of a slot span is "unused" (by most part, it only
// stores the offset from the head page). In other words, the metadata for the
// slot span is stored only in the first `PartitionPage` of the slot span.
// Metadata accesses to other `PartitionPage`s are redirected to the first
// `PartitionPage`.
//
// +---------------------------------------------+
// | SuperPageExtentEntry (32 B) |
// | PartitionPage of slot span 1 (32 B, used) |
// | PartitionPage of slot span 1 (32 B, unused) |
// | PartitionPage of slot span 1 (32 B, unused) |
// | PartitionPage of slot span 2 (32 B, used) |
// | PartitionPage of slot span 3 (32 B, used) |
// | ... |
// | PartitionPage of slot span N (32 B, used) |
// | PartitionPage of slot span N (32 B, unused) |
// | PartitionPage of slot span N (32 B, unused) |
// +---------------------------------------------+
//
// A direct-mapped page has an identical layout at the beginning to fake it
// looking like a super page:
//
// +---------------------------------+
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | Direct mapped object |
// | Guard page (4 KiB, 32-bit only) |
// +---------------------------------+
//
// A direct-mapped page's metadata page has the following layout (on 64 bit
// architectures. On 32 bit ones, the layout is identical, some sizes are
// different due to smaller pointers.):
//
// +----------------------------------+
// | SuperPageExtentEntry (32 B) |
// | PartitionPage (32 B) |
// | PartitionBucket (40 B) |
// | PartitionDirectMapExtent (32 B) |
// +----------------------------------+
//
// See |PartitionDirectMapMetadata| for details.
static const size_t kSuperPageShift = 21; // 2 MiB
static const size_t kSuperPageSize = 1 << kSuperPageShift;
static const size_t kSuperPageAlignment = kSuperPageSize;
static const size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
NumPartitionPagesPerSuperPage() {
return kSuperPageSize / PartitionPageSize();
}
// Alignment has two constraints:
// - Alignment requirement for scalar types: alignof(std::max_align_t)
// - Alignment requirement for operator new().
//
// The two are separate on Windows 64 bits, where the first one is 8 bytes, and
// the second one 16. We could technically return something different for
// malloc() and operator new(), but this would complicate things, and most of
// our allocations are presumably coming from operator new() anyway.
//
// __STDCPP_DEFAULT_NEW_ALIGNMENT__ is C++17. As such, it is not defined on all
// platforms, as Chrome's requirement is C++14 as of 2020.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
static constexpr size_t kAlignment =
std::max(alignof(std::max_align_t), __STDCPP_DEFAULT_NEW_ALIGNMENT__);
#else
static constexpr size_t kAlignment = alignof(std::max_align_t);
#endif
static_assert(kAlignment <= 16,
"PartitionAlloc doesn't support a fundamental alignment larger "
"than 16 bytes.");
// The "order" of an allocation is closely related to the power-of-1 size of the
// allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts at
// index 1 for the least-significant-bit.
//
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
// PartitionAlloc should return memory properly aligned for any type, to behave
// properly as a generic allocator. This is not strictly required as long as
// types are explicitly allocated with PartitionAlloc, but is to use it as a
// malloc() implementation, and generally to match malloc()'s behavior.
//
// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
// bytes on 64 bit ones.
static const size_t kMinBucketedOrder =
kAlignment == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
static const size_t kMaxBucketedOrder = 20;
static const size_t kNumBucketedOrders =
(kMaxBucketedOrder - kMinBucketedOrder) + 1;
// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
// 160, ..., 240:
static const size_t kNumBucketsPerOrderBits = 3;
static const size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
static const size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
static const size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
static const size_t kMaxBucketSpacing =
1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
static const size_t kMaxBucketed =
(1 << (kMaxBucketedOrder - 1)) +
((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
// Limit when downsizing a direct mapping using `realloc`:
static const size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
// harder to exploit.
//
// There are matching limits in other allocators, such as tcmalloc. See
// crbug.com/998048 for details.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxDirectMapped() {
// Subtract kSuperPageSize to accommodate for alignment inside
// PartitionRoot::GetDirectMapReservedSize.
return (1UL << 31) - kSuperPageSize;
}
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// Constant for the memory reclaim logic.
static const size_t kMaxFreeableSpans = 16;
// If the total size in bytes of allocated but not committed pages exceeds this
// value (probably it is a "out of virtual address space" crash), a special
// crash stack trace is generated at
// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
// of virtual address space" from "out of physical memory" in crash reports.
static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
// These byte values match tcmalloc.
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
static const unsigned char kQuarantinedByte = 0xEF;
// Flags for `PartitionAllocFlags`.
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
PartitionAllocZeroFill = 1 << 1,
PartitionAllocNoHooks = 1 << 2, // Internal only.
// If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common.
PartitionAllocFastPathOrReturnNull = 1 << 3, // Internal only.
PartitionAllocLastFlag = PartitionAllocFastPathOrReturnNull
};
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_

View File

@ -0,0 +1,34 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/feature_list.h"
namespace base {
namespace features {
#if PA_ALLOW_PCSCAN
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT};
#endif // PA_ALLOW_PCSCAN
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
const Feature kPartitionAllocPCScanBrowserOnly{
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
// If enabled, the thread cache will be periodically purged.
const Feature kPartitionAllocThreadCachePeriodicPurge{
"PartitionAllocThreadCachePeriodicPurge", FEATURE_DISABLED_BY_DEFAULT};
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
const Feature kPartitionAllocLazyCommit{"PartitionAllocLazyCommit",
FEATURE_ENABLED_BY_DEFAULT};
} // namespace features
} // namespace base

View File

@ -0,0 +1,76 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/partition_alloc_buildflags.h"
#include "build/build_config.h"
#if defined(OS_WIN)
// VersionHelpers.h depends on Windows.h.
#include <Windows.h>
// For IsWindows8Point1OrGreater().
#include <VersionHelpers.h>
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && !BUILDFLAG(USE_BACKUP_REF_PTR)
#define PA_ALLOW_PCSCAN 1
#else
#define PA_ALLOW_PCSCAN 0
#endif
namespace base {
struct Feature;
namespace features {
#if PA_ALLOW_PCSCAN
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
#endif // PA_ALLOW_PCSCAN
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
extern const BASE_EXPORT Feature kPartitionAllocThreadCachePeriodicPurge;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern const BASE_EXPORT Feature kPartitionAllocLazyCommit;
ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() {
#if defined(PA_HAS_64_BITS_POINTERS) && defined(OS_WIN)
// Lots of crashes (at PartitionAddressSpace::Init) occur
// when enabling GigaCage on Windows whose version is smaller than 8.1,
// because PTEs for reserved memory counts against commit limit. See
// https://crbug.com/1101421.
// TODO(tasak): this windows version check is the same as GetRandomPageBase()
// (address_space_randomization.cc). Refactor the code to avoid the
// duplication.
static bool is_windows_version_checked = false;
// Don't assign directly IsWindows8Point1OrGreater() to a static local
// variable, because the initial value is not trivial and the assignment needs
// thread-safe static-local initializer on Windows. (i.e. Init_thread_header)
// This causes issues when used on the allocation path (see
// crbug.com/1126432). As we don't use atomics here, this may end up querying
// the version multiple times, which is fine, as this operation is idempotent,
// with no side-effects.
static bool recent_enough_windows_version = false;
if (!is_windows_version_checked) {
recent_enough_windows_version = IsWindows8Point1OrGreater();
is_windows_version_checked = true;
}
if (!recent_enough_windows_version)
return false;
#endif // defined(PA_HAS_64_BITS_POINTERS) && defined(OS_WIN)
return true;
}
} // namespace features
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_

View File

@ -0,0 +1,36 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/partition_alloc_buildflags.h"
namespace base {
namespace internal {
template <bool thread_safe>
struct SlotSpanMetadata;
constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
#if DCHECK_IS_ON()
BASE_EXPORT void DCheckGetSlotOffsetIsZero(void*);
#else
ALWAYS_INLINE void DCheckGetSlotOffsetIsZero(void*) {}
#endif
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} // namespace internal
template <bool thread_safe>
struct PartitionRoot;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_

View File

@ -0,0 +1,114 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
namespace base {
Lock& GetHooksLock() {
static NoDestructor<Lock> lock;
return *lock;
}
std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
std::atomic<PartitionAllocHooks::AllocationObserverHook*>
PartitionAllocHooks::allocation_observer_hook_(nullptr);
std::atomic<PartitionAllocHooks::FreeObserverHook*>
PartitionAllocHooks::free_observer_hook_(nullptr);
std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
PartitionAllocHooks::allocation_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::FreeOverrideHook*>
PartitionAllocHooks::free_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
PartitionAllocHooks::realloc_override_hook_(nullptr);
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) {
AutoLock guard(GetHooksLock());
// Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to
// overwrite a hook.
PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
(!alloc_hook && !free_hook))
<< "Overwriting already set observer hooks";
allocation_observer_hook_ = alloc_hook;
free_observer_hook_ = free_hook;
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
}
void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
FreeOverrideHook* free_hook,
ReallocOverrideHook realloc_hook) {
AutoLock guard(GetHooksLock());
PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
!realloc_override_hook_) ||
(!alloc_hook && !free_hook && !realloc_hook))
<< "Overwriting already set override hooks";
allocation_override_hook_ = alloc_hook;
free_override_hook_ = free_hook;
realloc_override_hook_ = realloc_hook;
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
}
void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address,
size_t size,
const char* type_name) {
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name);
}
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
void** out,
int flags,
size_t size,
const char* type_name) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name);
return false;
}
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address);
}
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address);
return false;
}
void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
void* new_address,
size_t size,
const char* type_name) {
// Report a reallocation as a free followed by an allocation.
AllocationObserverHook* allocation_hook =
allocation_observer_hook_.load(std::memory_order_relaxed);
FreeObserverHook* free_hook =
free_observer_hook_.load(std::memory_order_relaxed);
if (allocation_hook && free_hook) {
free_hook(old_address);
allocation_hook(new_address, size, type_name);
}
}
bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
void* address) {
if (ReallocOverrideHook* hook =
realloc_override_hook_.load(std::memory_order_relaxed)) {
return hook(out, address);
}
return false;
}
} // namespace base

View File

@ -0,0 +1,86 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
#include <atomic>
#include <cstddef>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/base_export.h"
namespace base {
// PartitionAlloc supports setting hooks to observe allocations/frees as they
// occur as well as 'override' hooks that allow overriding those operations.
class BASE_EXPORT PartitionAllocHooks {
public:
// Log allocation and free events.
typedef void AllocationObserverHook(void* address,
size_t size,
const char* type_name);
typedef void FreeObserverHook(void* address);
// If it returns true, the allocation has been overridden with the pointer in
// *out.
typedef bool AllocationOverrideHook(void** out,
int flags,
size_t size,
const char* type_name);
// If it returns true, then the allocation was overridden and has been freed.
typedef bool FreeOverrideHook(void* address);
// If it returns true, the underlying allocation is overridden and *out holds
// the size of the underlying allocation.
typedef bool ReallocOverrideHook(size_t* out, void* address);
// To unhook, call Set*Hooks with nullptrs.
static void SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook);
static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
FreeOverrideHook* free_hook,
ReallocOverrideHook realloc_hook);
// Helper method to check whether hooks are enabled. This is an optimization
// so that if a function needs to call observer and override hooks in two
// different places this value can be cached and only loaded once.
static bool AreHooksEnabled() {
return hooks_enabled_.load(std::memory_order_relaxed);
}
static void AllocationObserverHookIfEnabled(void* address,
size_t size,
const char* type_name);
static bool AllocationOverrideHookIfEnabled(void** out,
int flags,
size_t size,
const char* type_name);
static void FreeObserverHookIfEnabled(void* address);
static bool FreeOverrideHookIfEnabled(void* address);
static void ReallocObserverHookIfEnabled(void* old_address,
void* new_address,
size_t size,
const char* type_name);
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
private:
// Single bool that is used to indicate whether observer or allocation hooks
// are set to reduce the numbers of loads required to check whether hooking is
// enabled.
static std::atomic<bool> hooks_enabled_;
// Lock used to synchronize Set*Hooks calls.
static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
static std::atomic<FreeObserverHook*> free_observer_hook_;
static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
static std::atomic<FreeOverrideHook*> free_override_hook_;
static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
};
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_

View File

@ -0,0 +1,684 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/object_bitmap.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/bits.h"
#include "base/check.h"
#include "build/build_config.h"
namespace base {
namespace internal {
namespace {
template <bool thread_safe>
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
size_t slot_size = PartitionRoot<thread_safe>::GetDirectMapSlotSize(raw_size);
size_t reserved_size = root->GetDirectMapReservedSize(raw_size);
size_t map_size =
reserved_size -
PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
PA_DCHECK(slot_size <= map_size);
char* ptr = nullptr;
// Allocate from GigaCage, if enabled.
bool with_giga_cage = features::IsPartitionAllocGigaCageEnabled();
if (with_giga_cage) {
ptr = internal::AddressPoolManager::GetInstance()->Reserve(
GetDirectMapPool(), nullptr, reserved_size);
} else {
ptr = reinterpret_cast<char*>(
AllocPages(nullptr, reserved_size, kSuperPageAlignment,
PageInaccessible, PageTag::kPartitionAlloc));
}
if (UNLIKELY(!ptr))
return nullptr;
root->total_size_of_direct_mapped_pages.fetch_add(reserved_size,
std::memory_order_relaxed);
char* slot = ptr + PartitionPageSize();
RecommitSystemPages(ptr + SystemPageSize(), SystemPageSize(), PageReadWrite,
PageUpdatePermissions);
// It is typically possible to map a large range of inaccessible pages, and
// this is leveraged in multiple places, including the GigaCage. However, this
// doesn't mean that we can commit all this memory. For the vast majority of
// allocations, this just means that we crash in a slightly different places,
// but for callers ready to handle failures, we have to return nullptr.
// See crbug.com/1187404.
//
// Note that we didn't check above, because if we cannot even commit a single
// page, then this is likely hopeless anyway, and we will crash very soon.
bool ok = root->TryRecommitSystemPagesForData(slot, slot_size,
PageUpdatePermissions);
if (!ok) {
if (with_giga_cage) {
internal::AddressPoolManager::GetInstance()->UnreserveAndDecommit(
GetDirectMapPool(), ptr, reserved_size);
} else {
FreePages(ptr, reserved_size);
}
return nullptr;
}
auto* metadata = reinterpret_cast<PartitionDirectMapMetadata<thread_safe>*>(
PartitionSuperPageToMetadataArea(ptr));
metadata->extent.root = root;
// The new structures are all located inside a fresh system page so they
// will all be zeroed out. These DCHECKs are for documentation.
PA_DCHECK(!metadata->extent.super_page_base);
PA_DCHECK(!metadata->extent.super_pages_end);
PA_DCHECK(!metadata->extent.next);
// Call FromSlotInnerPtr instead of FromSlotStartPtr, because the bucket isn't
// set up yet to properly assert the slot start.
PA_DCHECK(PartitionPage<thread_safe>::FromSlotInnerPtr(slot) ==
&metadata->page);
auto* page = &metadata->page;
PA_DCHECK(!page->slot_span_metadata_offset);
PA_DCHECK(!page->slot_span_metadata.next_slot_span);
PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
PA_DCHECK(!page->slot_span_metadata.empty_cache_index);
PA_DCHECK(!metadata->bucket.active_slot_spans_head);
PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
PA_DCHECK(!metadata->bucket.num_full_slot_spans);
metadata->bucket.slot_size = slot_size;
new (&page->slot_span_metadata)
SlotSpanMetadata<thread_safe>(&metadata->bucket);
auto* next_entry = new (slot) PartitionFreelistEntry();
page->slot_span_metadata.SetFreelistHead(next_entry);
auto* map_extent = &metadata->direct_map_extent;
map_extent->map_size = map_size;
map_extent->bucket = &metadata->bucket;
// Maintain the doubly-linked list of all direct mappings.
map_extent->next_extent = root->direct_map_list;
if (map_extent->next_extent)
map_extent->next_extent->prev_extent = map_extent;
map_extent->prev_extent = nullptr;
root->direct_map_list = map_extent;
return &page->slot_span_metadata;
}
} // namespace
// TODO(ajwong): This seems to interact badly with
// get_pages_per_slot_span() which rounds the value from this up to a
// multiple of NumSystemPagesPerPartitionPage() (aka 4) anyways.
// http://crbug.com/776537
//
// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
// both used and unsed pages.
// http://crbug.com/776537
template <bool thread_safe>
uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
// This works out reasonably for the current bucket sizes of the generic
// allocator, and the current values of partition page size and constants.
// Specifically, we have enough room to always pack the slots perfectly into
// some number of system pages. The only waste is the waste associated with
// unfaulted pages (i.e. wasted address space).
// TODO: we end up using a lot of system pages for very small sizes. For
// example, we'll use 12 system pages for slot size 24. The slot size is
// so small that the waste would be tiny with just 4, or 1, system pages.
// Later, we can investigate whether there are anti-fragmentation benefits
// to using fewer system pages.
double best_waste_ratio = 1.0f;
uint16_t best_pages = 0;
if (slot_size > MaxSystemPagesPerSlotSpan() * SystemPageSize()) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
PA_DCHECK(!(slot_size % SystemPageSize()));
best_pages = static_cast<uint16_t>(slot_size / SystemPageSize());
// TODO(ajwong): Should this be checking against
// MaxSystemPagesPerSlotSpan() or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
PA_CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
PA_DCHECK(slot_size <= MaxSystemPagesPerSlotSpan() * SystemPageSize());
for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
i <= MaxSystemPagesPerSlotSpan(); ++i) {
size_t page_size = SystemPageSize() * i;
size_t num_slots = page_size / slot_size;
size_t waste = page_size - (num_slots * slot_size);
// Leaving a page unfaulted is not free; the page will occupy an empty page
// table entry. Make a simple attempt to account for that.
//
// TODO(ajwong): This looks wrong. PTEs are allocated for all pages
// regardless of whether or not they are wasted. Should it just
// be waste += i * sizeof(void*)?
// http://crbug.com/776537
size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
size_t num_unfaulted_pages =
num_remainder_pages
? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
: 0;
waste += sizeof(void*) * num_unfaulted_pages;
double waste_ratio =
static_cast<double>(waste) / static_cast<double>(page_size);
if (waste_ratio < best_waste_ratio) {
best_waste_ratio = waste_ratio;
best_pages = i;
}
}
PA_DCHECK(best_pages > 0);
PA_CHECK(best_pages <= MaxSystemPagesPerSlotSpan());
return static_cast<uint8_t>(best_pages);
}
template <bool thread_safe>
void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
slot_size = new_slot_size;
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
empty_slot_spans_head = nullptr;
decommitted_slot_spans_head = nullptr;
num_full_slot_spans = 0;
num_system_pages_per_slot_span = get_system_pages_per_slot_span();
}
template <bool thread_safe>
NOINLINE void PartitionBucket<thread_safe>::OnFull() {
OOM_CRASH(0);
}
template <bool thread_safe>
ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages,
size_t slot_span_committed_size) {
PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
PartitionPageSize()));
PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
PartitionPageSize()));
PA_DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
PA_DCHECK(slot_span_committed_size % SystemPageSize() == 0);
size_t slot_span_reserved_size = PartitionPageSize() * num_partition_pages;
PA_DCHECK(slot_span_committed_size <= slot_span_reserved_size);
size_t num_partition_pages_left =
(root->next_partition_page_end - root->next_partition_page) >>
PartitionPageShift();
if (UNLIKELY(num_partition_pages_left < num_partition_pages)) {
// In this case, we can no longer hand out pages from the current super page
// allocation. Get a new super page.
if (!AllocNewSuperPage(root)) {
return nullptr;
}
}
char* ret = root->next_partition_page;
root->next_partition_page += slot_span_reserved_size;
// System pages in the super page come in a decommited state. Commit them
// before vending them back.
// If lazy commit is enabled, pages will be committed when provisioning slots,
// in ProvisionMoreSlotsAndAllocOne(), not here.
if (!root->use_lazy_commit) {
root->RecommitSystemPagesForData(ret, slot_span_committed_size,
PageUpdatePermissions);
}
// Double check that we had enough space in the super page for the new slot
// span.
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSuperPage(
PartitionRoot<thread_safe>* root) {
// Need a new super page. We want to allocate super pages in a contiguous
// address region as much as possible. This is important for not causing
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
char* requested_address = root->next_super_page;
char* super_page = nullptr;
// Allocate from GigaCage, if enabled. However, the exception to this is when
// ref-count isn't allowed, as CheckedPtr assumes that everything inside
// GigaCage uses ref-count (specifically, inside the GigaCage's normal bucket
// pool).
if (root->UsesGigaCage()) {
super_page = AddressPoolManager::GetInstance()->Reserve(
GetNormalBucketPool(), requested_address, kSuperPageSize);
} else {
super_page = reinterpret_cast<char*>(
AllocPages(requested_address, kSuperPageSize, kSuperPageAlignment,
PageInaccessible, PageTag::kPartitionAlloc));
}
if (UNLIKELY(!super_page))
return nullptr;
root->total_size_of_super_pages.fetch_add(kSuperPageSize,
std::memory_order_relaxed);
root->next_super_page = super_page + kSuperPageSize;
char* quarantine_bitmaps = super_page + PartitionPageSize();
const size_t quarantine_bitmaps_reserved_size =
root->IsQuarantineAllowed() ? ReservedQuarantineBitmapsSize() : 0;
const size_t quarantine_bitmaps_size_to_commit =
root->IsQuarantineAllowed() ? CommittedQuarantineBitmapsSize() : 0;
PA_DCHECK(quarantine_bitmaps_reserved_size % PartitionPageSize() == 0);
PA_DCHECK(quarantine_bitmaps_size_to_commit % SystemPageSize() == 0);
PA_DCHECK(quarantine_bitmaps_size_to_commit <=
quarantine_bitmaps_reserved_size);
char* ret = quarantine_bitmaps + quarantine_bitmaps_reserved_size;
root->next_partition_page = ret;
root->next_partition_page_end = root->next_super_page - PartitionPageSize();
PA_DCHECK(ret ==
SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
// Keep the first partition page in the super page inaccessible to serve as a
// guard page, except an "island" in the middle where we put page metadata and
// also a tiny amount of extent metadata.
RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(),
PageReadWrite, PageUpdatePermissions);
// If PCScan is used, commit the quarantine bitmap. Otherwise, leave it
// uncommitted and let PartitionRoot::EnablePCScan commit it when needed.
if (root->IsQuarantineEnabled()) {
RecommitSystemPages(quarantine_bitmaps, quarantine_bitmaps_size_to_commit,
PageReadWrite, PageUpdatePermissions);
}
// If we were after a specific address, but didn't get it, assume that
// the system chose a lousy address. Here most OS'es have a default
// algorithm that isn't randomized. For example, most Linux
// distributions will allocate the mapping directly before the last
// successful mapping, which is far from random. So we just get fresh
// randomness for the next mapping attempt.
if (requested_address && requested_address != super_page)
root->next_super_page = nullptr;
// We allocated a new super page so update super page metadata.
// First check if this is a new extent or not.
auto* latest_extent =
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
PartitionSuperPageToMetadataArea(super_page));
// By storing the root in every extent metadata object, we have a fast way
// to go from a pointer within the partition to the root object.
latest_extent->root = root;
// Most new extents will be part of a larger extent, and these three fields
// are unused, but we initialize them to 0 so that we get a clear signal
// in case they are accidentally used.
latest_extent->super_page_base = nullptr;
latest_extent->super_pages_end = nullptr;
latest_extent->next = nullptr;
PartitionSuperPageExtentEntry<thread_safe>* current_extent =
root->current_extent;
const bool is_new_extent = super_page != requested_address;
if (UNLIKELY(is_new_extent)) {
if (UNLIKELY(!current_extent)) {
PA_DCHECK(!root->first_extent);
root->first_extent = latest_extent;
} else {
PA_DCHECK(current_extent->super_page_base);
current_extent->next = latest_extent;
}
root->current_extent = latest_extent;
latest_extent->super_page_base = super_page;
latest_extent->super_pages_end = super_page + kSuperPageSize;
} else {
// We allocated next to an existing extent so just nudge the size up a
// little.
PA_DCHECK(current_extent->super_pages_end);
current_extent->super_pages_end += kSuperPageSize;
PA_DCHECK(ret >= current_extent->super_page_base &&
ret < current_extent->super_pages_end);
}
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span) {
new (slot_span) SlotSpanMetadata<thread_safe>(this);
slot_span->empty_cache_index = -1;
slot_span->Reset();
uint16_t num_partition_pages = get_pages_per_slot_span();
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
auto* secondary_page = page + i;
secondary_page->slot_span_metadata_offset = i;
}
}
template <bool thread_safe>
ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
PartitionRoot<thread_safe>* root,
SlotSpanMetadata<thread_safe>* slot_span) {
PA_DCHECK(slot_span !=
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
uint16_t num_slots = slot_span->num_unprovisioned_slots;
PA_DCHECK(num_slots);
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
PA_DCHECK(!slot_span->freelist_head);
PA_DCHECK(slot_span->num_allocated_slots >= 0);
size_t size = slot_size;
char* base = reinterpret_cast<char*>(
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
// If we got here, the first unallocated slot is either partially or fully on
// an uncommitted page. If the latter, it must be at the start of that page.
char* return_slot = base + (size * slot_span->num_allocated_slots);
char* next_slot = return_slot + size;
char* commit_start = bits::AlignUp(return_slot, SystemPageSize());
PA_DCHECK(next_slot > commit_start);
char* commit_end = bits::AlignUp(next_slot, SystemPageSize());
// If the slot was partially committed, |return_slot| and |next_slot| fall
// in different pages. If the slot was fully uncommitted, |return_slot| points
// to the page start and |next_slot| doesn't, thus only the latter gets
// rounded up.
PA_DCHECK(commit_end > commit_start);
// If lazy commit is enabled, meaning system pages in the slot span come
// in an initially decommitted state, commit them here.
// Note, we can't use PageKeepPermissionsIfPossible, because we have no
// knowledge which pages have been committed before (it doesn't matter on
// Windows anyway).
if (root->use_lazy_commit) {
// TODO(lizeb): Handle commit failure.
root->RecommitSystemPagesForData(commit_start, commit_end - commit_start,
PageUpdatePermissions);
}
// The slot being returned is considered allocated, and no longer
// unprovisioned.
slot_span->num_allocated_slots++;
slot_span->num_unprovisioned_slots--;
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
char* next_slot_end = next_slot + size;
while (next_slot_end <= commit_end) {
auto* entry = new (next_slot) PartitionFreelistEntry();
if (!slot_span->freelist_head) {
PA_DCHECK(!prev_entry);
slot_span->SetFreelistHead(entry);
} else {
prev_entry->SetNext(entry);
}
next_slot = next_slot_end;
next_slot_end = next_slot + size;
prev_entry = entry;
slot_span->num_unprovisioned_slots--;
}
#if DCHECK_IS_ON()
slot_span->freelist_head->CheckFreeList();
#endif
return return_slot;
}
template <bool thread_safe>
bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
return false;
SlotSpanMetadata<thread_safe>* next_slot_span;
for (; slot_span; slot_span = next_slot_span) {
next_slot_span = slot_span->next_slot_span;
PA_DCHECK(slot_span->bucket == this);
PA_DCHECK(slot_span != empty_slot_spans_head);
PA_DCHECK(slot_span != decommitted_slot_spans_head);
if (LIKELY(slot_span->is_active())) {
// This slot span is usable because it has freelist entries, or has
// unprovisioned slots we can create freelist entries from.
active_slot_spans_head = slot_span;
return true;
}
// Deal with empty and decommitted slot spans.
if (LIKELY(slot_span->is_empty())) {
slot_span->next_slot_span = empty_slot_spans_head;
empty_slot_spans_head = slot_span;
} else if (LIKELY(slot_span->is_decommitted())) {
slot_span->next_slot_span = decommitted_slot_spans_head;
decommitted_slot_spans_head = slot_span;
} else {
PA_DCHECK(slot_span->is_full());
// If we get here, we found a full slot span. Skip over it too, and also
// mark it as full (via a negative value). We need it marked so that
// free'ing can tell, and move it back into the active list.
slot_span->num_allocated_slots = -slot_span->num_allocated_slots;
++num_full_slot_spans;
// num_full_slot_spans is a uint16_t for efficient packing so guard
// against overflow to be safe.
if (UNLIKELY(!num_full_slot_spans))
OnFull();
// Not necessary but might help stop accidents.
slot_span->next_slot_span = nullptr;
}
}
active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
return false;
}
template <bool thread_safe>
void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size,
bool* is_already_zeroed) {
// The slow path is called when the freelist is empty.
PA_DCHECK(!active_slot_spans_head->freelist_head);
SlotSpanMetadata<thread_safe>* new_slot_span = nullptr;
// |new_slot_span->bucket| will always be |this|, except when |this| is the
// sentinel bucket, which is used to signal a direct mapped allocation. In
// this case |new_bucket| will be set properly later. This avoids a read for
// most allocations.
PartitionBucket* new_bucket = this;
*is_already_zeroed = false;
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
// marked as special cases. We bounce them through to the slow path so that
// we can still have a blazing fast hot path due to lack of corner-case
// branches.
//
// Note: The ordering of the conditionals matter! In particular,
// SetNewActiveSlotSpan() has a side-effect even when returning
// false where it sweeps the active list and may move things into the empty or
// decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(raw_size > kMaxBucketed);
PA_DCHECK(this == &root->sentinel_bucket);
PA_DCHECK(active_slot_spans_head ==
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
// No fast path for direct-mapped allocations.
if (flags & PartitionAllocFastPathOrReturnNull)
return nullptr;
if (raw_size > MaxDirectMapped()) {
if (return_null)
return nullptr;
// The lock is here to protect PA from:
// 1. Concurrent calls
// 2. Reentrant calls
//
// This is fine here however, as:
// 1. Concurrency: |PartitionRoot::OutOfMemory()| never returns, so the
// lock will not be re-acquired, which would lead to acting on
// inconsistent data that could have been modified in-between releasing
// and acquiring it.
// 2. Reentrancy: This is why we release the lock. On some platforms,
// terminating the process may free() memory, or even possibly try to
// allocate some. Calling free() is fine, but will deadlock since
// |PartitionRoot::lock_| is not recursive.
//
// Supporting reentrant calls properly is hard, and not a requirement for
// PA. However up to that point, we've only *read* data, not *written* to
// any state. Reentrant calls are then fine, especially as we don't
// continue on this path. The only downside is possibly endless recursion
// if the OOM handler allocates and fails to use UncheckedMalloc() or
// equivalent, but that's violating the contract of
// base::TerminateBecauseOutOfMemory().
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
PartitionExcessiveAllocationSize(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
new_slot_span = PartitionDirectMap(root, flags, raw_size);
if (new_slot_span)
new_bucket = new_slot_span->bucket;
// Memory from PageAllocator is always zeroed.
*is_already_zeroed = true;
} else if (LIKELY(SetNewActiveSlotSpan())) {
// First, did we find an active slot span in the active list?
new_slot_span = active_slot_spans_head;
PA_DCHECK(new_slot_span->is_active());
} else if (LIKELY(empty_slot_spans_head != nullptr) ||
LIKELY(decommitted_slot_spans_head != nullptr)) {
// Second, look in our lists of empty and decommitted slot spans.
// Check empty slot spans first, which are preferred, but beware that an
// empty slot span might have been decommitted.
while (LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
PA_DCHECK(new_slot_span->bucket == this);
PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
empty_slot_spans_head = new_slot_span->next_slot_span;
// Accept the empty slot span unless it got decommitted.
if (new_slot_span->freelist_head) {
new_slot_span->next_slot_span = nullptr;
break;
}
PA_DCHECK(new_slot_span->is_decommitted());
new_slot_span->next_slot_span = decommitted_slot_spans_head;
decommitted_slot_spans_head = new_slot_span;
}
if (UNLIKELY(!new_slot_span) &&
LIKELY(decommitted_slot_spans_head != nullptr)) {
// Commit can be expensive, don't do it.
if (flags & PartitionAllocFastPathOrReturnNull)
return nullptr;
new_slot_span = decommitted_slot_spans_head;
PA_DCHECK(new_slot_span->bucket == this);
PA_DCHECK(new_slot_span->is_decommitted());
decommitted_slot_spans_head = new_slot_span->next_slot_span;
// If lazy commit is enabled, pages will be recommitted when provisioning
// slots, in ProvisionMoreSlotsAndAllocOne(), not here.
if (!root->use_lazy_commit) {
void* addr =
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(new_slot_span);
// If lazy commit was never used, we have a guarantee that all slot span
// pages have been previously committed, and then decommitted using
// PageKeepPermissionsIfPossible, so use the same option as an
// optimization. Otherwise fall back to PageUpdatePermissions (slower).
// (Insider knowledge: as of writing this comment, lazy commit is only
// used on Windows and this flag is ignored there, thus no perf impact.)
// TODO(lizeb): Handle commit failure.
root->RecommitSystemPagesForData(
addr, new_slot_span->bucket->get_bytes_per_span(),
root->never_used_lazy_commit ? PageKeepPermissionsIfPossible
: PageUpdatePermissions);
}
new_slot_span->Reset();
*is_already_zeroed = kDecommittedPagesAreAlwaysZeroed;
}
PA_DCHECK(new_slot_span);
} else {
// Getting a new slot span is expensive, don't do it.
if (flags & PartitionAllocFastPathOrReturnNull)
return nullptr;
// Third. If we get here, we need a brand new slot span.
// TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
// as slot_span_committed_size.
uint16_t num_partition_pages = get_pages_per_slot_span();
void* raw_memory =
AllocNewSlotSpan(root, flags, num_partition_pages,
/* slot_span_committed_size= */ get_bytes_per_span());
if (LIKELY(raw_memory != nullptr)) {
// Call FromSlotInnerPtr instead of FromSlotStartPtr, because the bucket
// isn't set up yet to properly assert the slot start.
new_slot_span =
SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(raw_memory);
InitializeSlotSpan(new_slot_span);
// New memory from PageAllocator is always zeroed.
*is_already_zeroed = true;
}
}
// Bail if we had a memory allocation failure.
if (UNLIKELY(!new_slot_span)) {
PA_DCHECK(active_slot_spans_head ==
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
if (return_null)
return nullptr;
// See comment above.
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
root->OutOfMemory(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
PA_DCHECK(new_bucket != &root->sentinel_bucket);
new_bucket->active_slot_spans_head = new_slot_span;
if (new_slot_span->CanStoreRawSize())
new_slot_span->SetRawSize(raw_size);
// If we found an active slot span with free slots, or an empty slot span, we
// have a usable freelist head.
if (LIKELY(new_slot_span->freelist_head != nullptr)) {
PartitionFreelistEntry* entry = new_slot_span->freelist_head;
PartitionFreelistEntry* new_head = entry->GetNext();
new_slot_span->SetFreelistHead(new_head);
new_slot_span->num_allocated_slots++;
// We likely set *is_already_zeroed to true above, make sure that the
// freelist entry doesn't contain data.
return entry->ClearForAllocation();
}
// Otherwise, we need to provision more slots by committing more pages. Build
// the free list for the newly provisioned slots.
PA_DCHECK(new_slot_span->num_unprovisioned_slots);
return ProvisionMoreSlotsAndAllocOne(root, new_slot_span);
}
template struct PartitionBucket<ThreadSafe>;
template struct PartitionBucket<NotThreadSafe>;
} // namespace internal
} // namespace base

View File

@ -0,0 +1,182 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#include <stddef.h>
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/thread_annotations.h"
namespace base {
namespace internal {
template <bool thread_safe>
struct PartitionBucket {
// Accessed most in hot path => goes first.
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
uint32_t slot_size;
uint32_t num_system_pages_per_slot_span : 8;
uint32_t num_full_slot_spans : 24;
// `slot_size_reciprocal` is used to improve the performance of
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
// chosen to provide the desired accuracy. As a result, we can replace a slow
// integer division (or modulo) operation with a pair of multiplication and a
// bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
uint64_t slot_size_reciprocal;
// This is `M` from the formula above. For accurate results, both `value` and
// `size`, which are bound by `kMaxBucketed` for our purposes, must be less
// than `2 ** (M / 2)`. On the other hand, the result of the expression
// `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
static constexpr uint64_t kReciprocalShift = 42;
static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
static_assert(
kMaxBucketed < (1 << (kReciprocalShift / 2)),
"GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
"large.");
// Public API.
void Init(uint32_t new_slot_size);
// Sets |is_already_zeroed| to true if the allocation was satisfied by
// requesting (a) new page(s) from the operating system, or false otherwise.
// This enables an optimization for when callers use |PartitionAllocZeroFill|:
// there is no need to call memset on fresh pages; the OS has already zeroed
// them. (See |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in SlotSpanMetadata.
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
ALWAYS_INLINE bool CanStoreRawSize() const {
// For direct-map as well as single-slot slot spans (recognized by checking
// against |MaxSystemPagesPerSlotSpan()|), we have some spare metadata
// space in subsequent PartitionPage to store the raw size. It isn't only
// metadata space though, slot spans that have more than one slot can't have
// raw size stored, because we wouldn't know which slot it applies to.
if (LIKELY(slot_size <= MaxSystemPagesPerSlotSpan() * SystemPageSize()))
return false;
PA_DCHECK((slot_size % SystemPageSize()) == 0);
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
return true;
}
ALWAYS_INLINE bool is_direct_mapped() const {
return !num_system_pages_per_slot_span;
}
ALWAYS_INLINE size_t get_bytes_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
// https://crbug.com/680657
return num_system_pages_per_slot_span * SystemPageSize();
}
ALWAYS_INLINE uint16_t get_slots_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
// https://crbug.com/680657
return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
}
// Returns a natural number of partition pages (calculated by
// get_system_pages_per_slot_span()) to allocate from the current
// super page when the bucket runs out of slots.
ALWAYS_INLINE uint16_t get_pages_per_slot_span() const {
// Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
return (num_system_pages_per_slot_span +
(NumSystemPagesPerPartitionPage() - 1)) /
NumSystemPagesPerPartitionPage();
}
// This helper function scans a bucket's active slot span list for a suitable
// new active slot span. When it finds a suitable new active slot span (one
// that has free slots and is not empty), it is set as the new active slot
// span. If there is no suitable new active slot span, the current active slot
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
// slot spans are scanned, they are tidied up according to their state. Empty
// slot spans are swept on to the empty list, decommitted slot spans on to the
// decommitted list and full slot spans are unlinked from any list.
//
// This is where the guts of the bucket maintenance is done!
bool SetNewActiveSlotSpan();
// Returns a slot number starting from the beginning of the slot span.
ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) {
// See the static assertion for `kReciprocalShift` above.
PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
PA_DCHECK(slot_size <= kMaxBucketed);
const size_t offset_in_slot =
((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
return offset_in_slot;
}
private:
static NOINLINE void OnFull();
// Returns the number of system pages in a slot span.
//
// The calculation attempts to find the best number of system pages to
// allocate for the given slot_size to minimize wasted space. It uses a
// heuristic that looks at number of bytes wasted after the last slot and
// attempts to account for the PTE usage of each system page.
uint8_t get_system_pages_per_slot_span();
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be uninitialized.
// Returns nullptr on error.
ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages,
size_t committed_size)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
// Allocates a new super page from the current extent. All slot-spans will be
// in the decommitted state. Returns nullptr on error.
ALWAYS_INLINE void* AllocNewSuperPage(PartitionRoot<thread_safe>* root)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
// Each bucket allocates a slot span when it runs out of slots.
// A slot span's size is equal to get_pages_per_slot_span() number of
// partition pages. This function initializes all PartitionPage within the
// span to point to the first PartitionPage which holds all the metadata
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
// as the owner of the span. It does NOT put the slots into the bucket's
// freelist.
ALWAYS_INLINE void InitializeSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span);
// Commit 1 or more pages in |slot_span|, enough to get the next slot, which
// is returned by this function. If more slots fit into the committed pages,
// they'll be added to the free list of the slot span (note that next pointers
// are stored inside the slots).
// The free list must be empty when calling this function.
//
// If |slot_span| was freshly allocated, it must have been passed through
// InitializeSlotSpan() first.
ALWAYS_INLINE char* ProvisionMoreSlotsAndAllocOne(
PartitionRoot<thread_safe>* root,
SlotSpanMetadata<thread_safe>* slot_span)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
};
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_

View File

@ -0,0 +1,53 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h"
namespace base {
namespace internal {
// Handles alignment up to XMM instructions on Intel.
static constexpr size_t kCookieSize = 16;
// Cookies are enabled for debug builds.
#if DCHECK_IS_ON()
static constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
constexpr size_t kPartitionCookieSizeAdjustment = 2 * kCookieSize;
constexpr size_t kPartitionCookieOffsetAdjustment = kCookieSize;
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
PA_DCHECK(*cookie_ptr == kCookieValue[i]);
}
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i];
}
#else
constexpr size_t kPartitionCookieSizeAdjustment = 0;
constexpr size_t kPartitionCookieOffsetAdjustment = 0;
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {}
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {}
#endif // DCHECK_IS_ON()
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_

View File

@ -0,0 +1,57 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/check.h"
namespace base {
namespace internal {
template <bool thread_safe>
struct PartitionDirectMapExtent {
PartitionDirectMapExtent<thread_safe>* next_extent;
PartitionDirectMapExtent<thread_safe>* prev_extent;
PartitionBucket<thread_safe>* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data.
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span);
};
// Metadata page for direct-mapped allocations.
template <bool thread_safe>
struct PartitionDirectMapMetadata {
union {
PartitionSuperPageExtentEntry<thread_safe> extent;
// Never used, but must have the same size as a real PartitionPage.
PartitionPage<thread_safe> first_invalid_page;
};
PartitionPage<thread_safe> page;
PartitionBucket<thread_safe> bucket;
PartitionDirectMapExtent<thread_safe> direct_map_extent;
};
template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
// The page passed here is always |page| in |PartitionDirectMapMetadata|
// above. To get the metadata structure, need to get the invalid page address.
auto* first_invalid_page = page - 1;
auto* metadata = reinterpret_cast<PartitionDirectMapMetadata<thread_safe>*>(
first_invalid_page);
return &metadata->direct_map_extent;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_

View File

@ -0,0 +1,173 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/immediate_crash.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
// Enable free list hardening.
//
// Disabled on ARM64 Macs, as this crashes very early (crbug.com/1172236).
// TODO(lizeb): Enable in as many configurations as possible.
#if !(defined(OS_MAC) && defined(ARCH_CPU_ARM64))
#define PA_HAS_FREELIST_HARDENING
#endif
namespace base {
namespace internal {
namespace {
#if defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
[[noreturn]] NOINLINE void FreelistCorruptionDetected() {
IMMEDIATE_CRASH();
}
#endif // defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
} // namespace
struct EncodedPartitionFreelistEntry;
#if defined(PA_HAS_FREELIST_HARDENING)
static_assert((1 << kMinBucketedOrder) >= 2 * sizeof(void*),
"Need enough space for two pointers in freelist entries");
#endif
// Freelist entries are encoded for security reasons. See
// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
// the rationale and mechanism, respectively.
class PartitionFreelistEntry {
public:
PartitionFreelistEntry() { SetNext(nullptr); }
~PartitionFreelistEntry() = delete;
// Creates a new entry, with |next| following it.
static ALWAYS_INLINE PartitionFreelistEntry* InitForThreadCache(
void* slot_start,
PartitionFreelistEntry* next) {
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(slot_start);
// ThreadCache freelists can point to entries across superpage boundaries,
// no check contrary to |SetNext()|.
entry->SetNextInternal(next);
return entry;
}
// Placement new only.
void* operator new(size_t) = delete;
void operator delete(void* ptr) = delete;
void* operator new(size_t, void* buffer) { return buffer; }
ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
PartitionFreelistEntry* ptr) {
return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
}
ALWAYS_INLINE PartitionFreelistEntry* GetNext() const;
NOINLINE void CheckFreeList() const {
#if defined(PA_HAS_FREELIST_HARDENING)
for (auto* entry = this; entry; entry = entry->GetNext()) {
// |GetNext()| checks freelist integrity.
}
#endif
}
ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
#if DCHECK_IS_ON()
// Regular freelists always point to an entry within the same super page.
if (UNLIKELY(ptr &&
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) !=
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) {
FreelistCorruptionDetected();
}
#endif // DCHECK_IS_ON()
SetNextInternal(ptr);
}
// Zeroes out |this| before returning it.
ALWAYS_INLINE void* ClearForAllocation() {
next_ = nullptr;
#if defined(PA_HAS_FREELIST_HARDENING)
inverted_next_ = 0;
#endif
return reinterpret_cast<void*>(this);
}
private:
friend struct EncodedPartitionFreelistEntry;
ALWAYS_INLINE static void* Transform(void* ptr) {
// We use bswap on little endian as a fast mask for two reasons:
// 1) If an object is freed and its vtable used where the attacker doesn't
// get the chance to run allocations between the free and use, the vtable
// dereference is likely to fault.
// 2) If the attacker has a linear buffer overflow and elects to try and
// corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted.
// For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
#endif
return reinterpret_cast<void*>(masked);
}
ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr) {
next_ = Encode(ptr);
#if defined(PA_HAS_FREELIST_HARDENING)
inverted_next_ = ~reinterpret_cast<uintptr_t>(next_);
#endif
}
EncodedPartitionFreelistEntry* next_;
// This is intended to detect unintentional corruptions of the freelist.
// These can happen due to a Use-after-Free, or overflow of the previous
// allocation in the slot span.
#if defined(PA_HAS_FREELIST_HARDENING)
uintptr_t inverted_next_;
#endif
};
struct EncodedPartitionFreelistEntry {
char scrambled[sizeof(PartitionFreelistEntry*)];
#if defined(PA_HAS_FREELIST_HARDENING)
char copy_of_scrambled[sizeof(PartitionFreelistEntry*)];
#endif
EncodedPartitionFreelistEntry() = delete;
~EncodedPartitionFreelistEntry() = delete;
ALWAYS_INLINE static PartitionFreelistEntry* Decode(
EncodedPartitionFreelistEntry* ptr) {
return reinterpret_cast<PartitionFreelistEntry*>(
PartitionFreelistEntry::Transform(ptr));
}
};
static_assert(sizeof(PartitionFreelistEntry) ==
sizeof(EncodedPartitionFreelistEntry),
"Should not have padding");
ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext() const {
#if defined(PA_HAS_FREELIST_HARDENING)
// GetNext() can be called on decommitted memory, which is full of
// zeroes. This is not a corruption issue, so only check integrity when we
// have a non-nullptr |next_| pointer.
if (UNLIKELY(next_ && ~reinterpret_cast<uintptr_t>(next_) != inverted_next_))
FreelistCorruptionDetected();
#endif // defined(PA_HAS_FREELIST_HARDENING)
return EncodedPartitionFreelistEntry::Decode(next_);
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_

View File

@ -0,0 +1,62 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/allocator/partition_allocator/yield_processor.h"
#include "base/threading/platform_thread.h"
#if !defined(PA_HAS_SPINNING_MUTEX)
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <sched.h>
#define YIELD_THREAD sched_yield()
#else // Other OS
#warning "Thread yield not supported on this OS."
#define YIELD_THREAD ((void)0)
#endif // defined(OS_POSIX) || defined(OS_FUCHSIA)
namespace base {
namespace internal {
void SpinLock::AcquireSlow() {
// The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
// critical section defaults, and various other recommendations.
static const int kYieldProcessorTries = 1000;
// The value of |kYieldThreadTries| is completely made up.
static const int kYieldThreadTries = 10;
int yield_thread_count = 0;
do {
do {
for (int count = 0; count < kYieldProcessorTries; ++count) {
// Let the processor know we're spinning.
YIELD_PROCESSOR;
if (!lock_.load(std::memory_order_relaxed) &&
LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
return;
}
if (yield_thread_count < kYieldThreadTries) {
++yield_thread_count;
// Give the OS a chance to schedule something on this core.
YIELD_THREAD;
} else {
// At this point, it's likely that the lock is held by a lower priority
// thread that is unavailable to finish its work because of higher
// priority threads spinning here. Sleeping should ensure that they make
// progress.
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
} while (lock_.load(std::memory_order_relaxed));
} while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire)));
}
} // namespace internal
} // namespace base
#endif // !defined(PA_HAS_SPINNING_MUTEX)

View File

@ -0,0 +1,177 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
#include <atomic>
#include <type_traits>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/spinning_mutex.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
namespace base {
namespace internal {
template <bool thread_safe>
class LOCKABLE MaybeSpinLock {
public:
void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
void Unlock() UNLOCK_FUNCTION() {}
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
};
template <bool thread_safe>
class SCOPED_LOCKABLE ScopedGuard {
public:
explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
lock_.Lock();
}
~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
private:
MaybeSpinLock<thread_safe>& lock_;
};
template <bool thread_safe>
class SCOPED_LOCKABLE ScopedUnlockGuard {
public:
explicit ScopedUnlockGuard(MaybeSpinLock<thread_safe>& lock)
UNLOCK_FUNCTION(lock)
: lock_(lock) {
lock_.Unlock();
}
~ScopedUnlockGuard() EXCLUSIVE_LOCK_FUNCTION() { lock_.Lock(); }
private:
MaybeSpinLock<thread_safe>& lock_;
};
#if !defined(PA_HAS_SPINNING_MUTEX)
// Spinlock. Do not use, to be removed. crbug.com/1061437.
class LOCKABLE BASE_EXPORT SpinLock {
public:
constexpr SpinLock() = default;
~SpinLock() = default;
ALWAYS_INLINE void Acquire() EXCLUSIVE_LOCK_FUNCTION() {
if (LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
return;
AcquireSlow();
}
ALWAYS_INLINE bool Try() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
// Faster than simple CAS.
return !lock_.load(std::memory_order_relaxed) &&
!lock_.exchange(true, std::memory_order_acquire);
}
ALWAYS_INLINE void Release() UNLOCK_FUNCTION() {
lock_.store(false, std::memory_order_release);
}
// Not supported.
void AssertAcquired() const {}
private:
// This is called if the initial attempt to acquire the lock fails. It's
// slower, but has a much better scheduling and power consumption behavior.
void AcquireSlow();
std::atomic_int lock_{0};
};
#endif // !defined(PA_HAS_SPINNING_MUTEX)
template <>
class LOCKABLE MaybeSpinLock<true> {
public:
constexpr MaybeSpinLock() : lock_() {}
void Lock() EXCLUSIVE_LOCK_FUNCTION() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && DCHECK_IS_ON()
// When PartitionAlloc is malloc(), it can easily become reentrant. For
// instance, a DCHECK() triggers in external code (such as
// base::Lock). DCHECK() error message formatting allocates, which triggers
// PartitionAlloc, and then we get reentrancy, and in this case infinite
// recursion.
//
// To avoid that, crash quickly when the code becomes reentrant.
PlatformThreadRef current_thread = PlatformThread::CurrentRef();
if (!lock_.Try()) {
// The lock wasn't free when we tried to acquire it. This can be because
// another thread or *this* thread was holding it.
//
// If it's this thread holding it, then it cannot have become free in the
// meantime, and the current value of |owning_thread_ref_| is valid, as it
// was set by this thread. Assuming that writes to |owning_thread_ref_|
// are atomic, then if it's us, we are trying to recursively acquire a
// non-recursive lock.
//
// Note that we don't rely on a DCHECK() in base::Lock(), as it would
// itself allocate. Meaning that without this code, a reentrancy issue
// hangs on Linux.
if (UNLIKELY(TS_UNCHECKED_READ(owning_thread_ref_) == current_thread)) {
// Trying to acquire lock while it's held by this thread: reentrancy
// issue.
IMMEDIATE_CRASH();
}
lock_.Acquire();
}
owning_thread_ref_ = current_thread;
#else
lock_.Acquire();
#endif
}
void Unlock() UNLOCK_FUNCTION() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && DCHECK_IS_ON()
owning_thread_ref_ = PlatformThreadRef();
#endif
lock_.Release();
}
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
lock_.AssertAcquired();
}
private:
#if defined(PA_HAS_SPINNING_MUTEX)
SpinningMutex lock_;
#else
// base::Lock is slower on the fast path than SpinLock, hence we still use
// SpinLock. crbug.com/1125999
SpinLock lock_;
#endif // defined(PA_HAS_SPINNING_MUTEX)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && DCHECK_IS_ON()
PlatformThreadRef owning_thread_ref_ GUARDED_BY(lock_);
#endif
};
// We want PartitionRoot to not have a global destructor, so this should not
// have one.
static_assert(std::is_trivially_destructible<MaybeSpinLock<true>>::value, "");
template <>
class LOCKABLE MaybeSpinLock<false> {
public:
void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
void Unlock() UNLOCK_FUNCTION() {}
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
char padding_[sizeof(MaybeSpinLock<true>)];
};
static_assert(
sizeof(MaybeSpinLock<true>) == sizeof(MaybeSpinLock<false>),
"Sizes should be equal to ensure identical layout of PartitionRoot");
using PartitionLock = MaybeSpinLock<true>;
using PartitionAutoLock = ScopedGuard<true>;
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_

View File

@ -0,0 +1,38 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "build/build_config.h"
namespace base {
namespace internal {
OomFunction g_oom_handling_function = nullptr;
NOINLINE void NOT_TAIL_CALLED PartitionExcessiveAllocationSize(size_t size) {
NO_CODE_FOLDING();
OOM_CRASH(size);
}
#if !defined(ARCH_CPU_64_BITS)
NOINLINE void NOT_TAIL_CALLED
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
NO_CODE_FOLDING();
OOM_CRASH(size);
}
[[noreturn]] NOINLINE void NOT_TAIL_CALLED
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
NO_CODE_FOLDING();
OOM_CRASH(virtual_size);
}
#endif // !defined(ARCH_CPU_64_BITS)
} // namespace internal
} // namespace base

View File

@ -0,0 +1,40 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Holds functions for generating OOM errors from PartitionAlloc. This is
// distinct from oom.h in that it is meant only for use in PartitionAlloc.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
typedef void (*OomFunction)(size_t);
namespace internal {
// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
extern OomFunction g_oom_handling_function;
[[noreturn]] BASE_EXPORT NOINLINE void PartitionExcessiveAllocationSize(
size_t size);
#if !defined(ARCH_CPU_64_BITS)
[[noreturn]] NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(
size_t size);
[[noreturn]] NOINLINE void PartitionOutOfMemoryWithLargeVirtualSize(
size_t virtual_size);
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_

View File

@ -0,0 +1,230 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/bits.h"
#include "base/check.h"
#include "base/feature_list.h"
#include "base/notreached.h"
#include "build/build_config.h"
namespace base {
namespace internal {
namespace {
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap
PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) {
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
root->lock_.AssertAcquired();
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
// Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) {
PA_DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent;
} else {
root->direct_map_list = extent->next_extent;
}
if (extent->next_extent) {
PA_DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent;
}
// The actual decommit is deferred, when releasing the reserved memory region.
root->DecreaseCommittedPages(slot_span->bucket->slot_size);
size_t reserved_size =
extent->map_size +
PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
PA_DCHECK(!(reserved_size & PageAllocationGranularityOffsetMask()));
PA_DCHECK(root->total_size_of_direct_mapped_pages >= reserved_size);
root->total_size_of_direct_mapped_pages -= reserved_size;
PA_DCHECK(!(reserved_size & PageAllocationGranularityOffsetMask()));
char* ptr = reinterpret_cast<char*>(
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= PartitionPageSize();
return {ptr, reserved_size};
}
template <bool thread_safe>
ALWAYS_INLINE void PartitionRegisterEmptySlotSpan(
SlotSpanMetadata<thread_safe>* slot_span) {
PA_DCHECK(slot_span->is_empty());
PartitionRoot<thread_safe>* root =
PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
root->lock_.AssertAcquired();
// If the slot span is already registered as empty, give it another life.
if (slot_span->empty_cache_index != -1) {
PA_DCHECK(slot_span->empty_cache_index >= 0);
PA_DCHECK(static_cast<unsigned>(slot_span->empty_cache_index) <
kMaxFreeableSpans);
PA_DCHECK(root->global_empty_slot_span_ring[slot_span->empty_cache_index] ==
slot_span);
root->global_empty_slot_span_ring[slot_span->empty_cache_index] = nullptr;
}
int16_t current_index = root->global_empty_slot_span_ring_index;
SlotSpanMetadata<thread_safe>* slot_span_to_decommit =
root->global_empty_slot_span_ring[current_index];
// The slot span might well have been re-activated, filled up, etc. before we
// get around to looking at it here.
if (slot_span_to_decommit)
slot_span_to_decommit->DecommitIfPossible(root);
// We put the empty slot span on our global list of "slot spans that were once
// empty". thus providing it a bit of breathing room to get re-used before
// we really free it. This improves performance, particularly on Mac OS X
// which has subpar memory management performance.
root->global_empty_slot_span_ring[current_index] = slot_span;
slot_span->empty_cache_index = current_index;
++current_index;
if (current_index == kMaxFreeableSpans)
current_index = 0;
root->global_empty_slot_span_ring_index = current_index;
}
} // namespace
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>
SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
return &sentinel_slot_span_;
}
template <bool thread_safe>
SlotSpanMetadata<thread_safe>::SlotSpanMetadata(
PartitionBucket<thread_safe>* bucket)
: bucket(bucket), can_store_raw_size(bucket->CanStoreRawSize()) {}
template <bool thread_safe>
DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() {
#if DCHECK_IS_ON()
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
root->lock_.AssertAcquired();
#endif
PA_DCHECK(this != get_sentinel_slot_span());
if (LIKELY(num_allocated_slots == 0)) {
// Slot span became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
return PartitionDirectUnmap(this);
}
#if DCHECK_IS_ON()
freelist_head->CheckFreeList();
#endif
// If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_slot_spans_head))
bucket->SetNewActiveSlotSpan();
PA_DCHECK(bucket->active_slot_spans_head != this);
if (CanStoreRawSize())
SetRawSize(0);
PartitionRegisterEmptySlotSpan(this);
} else {
PA_DCHECK(!bucket->is_direct_mapped());
// Ensure that the slot span is full. That's the only valid case if we
// arrive here.
PA_DCHECK(num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free.
PA_CHECK(num_allocated_slots != -1);
num_allocated_slots = -num_allocated_slots - 2;
PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used slot span became partially used. It must be put back on the
// non-full list. Also make it the current slot span to increase the
// chances of it being filled up again. The old current slot span will be
// the next slot span.
PA_DCHECK(!next_slot_span);
if (LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
next_slot_span = bucket->active_slot_spans_head;
bucket->active_slot_spans_head = this;
--bucket->num_full_slot_spans;
// Special case: for a partition slot span with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(num_allocated_slots == 0))
return FreeSlowPath();
}
return {};
}
template <bool thread_safe>
void SlotSpanMetadata<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
PA_DCHECK(is_empty());
PA_DCHECK(!bucket->is_direct_mapped());
void* slot_span_start = SlotSpanMetadata::ToSlotSpanStartPtr(this);
// If lazy commit is enabled, only provisioned slots are committed.
size_t size_to_decommit =
root->use_lazy_commit
? bits::AlignUp(GetProvisionedSize(), SystemPageSize())
: bucket->get_bytes_per_span();
// Not decommitted slot span must've had at least 1 allocation.
PA_DCHECK(size_to_decommit > 0);
root->DecommitSystemPagesForData(slot_span_start, size_to_decommit,
PageKeepPermissionsIfPossible);
// We actually leave the decommitted slot span in the active list. We'll sweep
// it on to the decommitted list when we next walk the active list.
// Pulling this trick enables us to use a singly-linked list for all
// cases, which is critical in keeping the slot span metadata structure down
// to 32 bytes in size.
SetFreelistHead(nullptr);
num_unprovisioned_slots = 0;
PA_DCHECK(is_decommitted());
PA_DCHECK(bucket);
}
template <bool thread_safe>
void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
PA_DCHECK(empty_cache_index >= 0);
PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index]);
empty_cache_index = -1;
if (is_empty())
Decommit(root);
}
void DeferredUnmap::Unmap() {
PA_DCHECK(ptr && size > 0);
if (features::IsPartitionAllocGigaCageEnabled()) {
// Currently this function is only called for direct-mapped allocations.
PA_DCHECK(IsManagedByPartitionAllocDirectMap(ptr));
internal::AddressPoolManager::GetInstance()->UnreserveAndDecommit(
internal::GetDirectMapPool(), ptr, size);
} else {
FreePages(ptr, size);
}
}
template struct SlotSpanMetadata<ThreadSafe>;
template struct SlotSpanMetadata<NotThreadSafe>;
} // namespace internal
} // namespace base

View File

@ -0,0 +1,654 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
#include <string.h>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/object_bitmap.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/check_op.h"
#include "base/thread_annotations.h"
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
#include "base/allocator/partition_allocator/partition_ref_count.h"
#endif
namespace base {
namespace internal {
// An "extent" is a span of consecutive superpages. We link the partition's next
// extent (if there is one) to the very start of a superpage's metadata area.
template <bool thread_safe>
struct PartitionSuperPageExtentEntry {
PartitionRoot<thread_safe>* root;
char* super_page_base;
char* super_pages_end;
PartitionSuperPageExtentEntry<thread_safe>* next;
};
static_assert(
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
// SlotSpanMetadata::Free() defers unmapping a large page until the lock is
// released. Callers of SlotSpanMetadata::Free() must invoke Run().
// TODO(1061437): Reconsider once the new locking mechanism is implemented.
struct DeferredUnmap {
void* ptr = nullptr;
size_t size = 0;
// In most cases there is no page to unmap and ptr == nullptr. This function
// is inlined to avoid the overhead of a function call in the common case.
ALWAYS_INLINE void Run();
private:
BASE_EXPORT NOINLINE void Unmap();
};
using QuarantineBitmap =
ObjectBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
// Metadata of the slot span.
//
// Some notes on slot span states. It can be in one of four major states:
// 1) Active.
// 2) Full.
// 3) Empty.
// 4) Decommitted.
// An active slot span has available free slots. A full slot span has no free
// slots. An empty slot span has no free slots, and a decommitted slot span is
// an empty one that had its backing memory released back to the system.
//
// There are two linked lists tracking slot spans. The "active" list is an
// approximation of a list of active slot spans. It is an approximation because
// full, empty and decommitted slot spans may briefly be present in the list
// until we next do a scan over it. The "empty" list is an accurate list of slot
// spans which are either empty or decommitted.
//
// The significant slot span transitions are:
// - Free() will detect when a full slot span has a slot freed and immediately
// return the slot span to the head of the active list.
// - Free() will detect when a slot span is fully emptied. It _may_ add it to
// the empty list or it _may_ leave it on the active list until a future
// list scan.
// - Alloc() _may_ scan the active page list in order to fulfil the request.
// If it does this, full, empty and decommitted slot spans encountered will be
// booted out of the active list. If there are no suitable active slot spans
// found, an empty or decommitted slot spans (if one exists) will be pulled
// from the empty list on to the active list.
template <bool thread_safe>
struct __attribute__((packed)) SlotSpanMetadata {
PartitionFreelistEntry* freelist_head = nullptr;
SlotSpanMetadata<thread_safe>* next_slot_span = nullptr;
PartitionBucket<thread_safe>* const bucket;
// Deliberately signed, 0 for empty or decommitted slot spans, -n for full
// slot spans:
int16_t num_allocated_slots = 0;
uint16_t num_unprovisioned_slots = 0;
int8_t empty_cache_index = 0; // -1 if not in the empty cache.
// < kMaxFreeableSpans.
static_assert(kMaxFreeableSpans < std::numeric_limits<int8_t>::max(), "");
const bool can_store_raw_size;
explicit SlotSpanMetadata(PartitionBucket<thread_safe>* bucket);
// Public API
// Note the matching Alloc() functions are in PartitionPage.
// Callers must invoke DeferredUnmap::Run() after releasing the lock.
BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
void Decommit(PartitionRoot<thread_safe>* root);
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
// Pointer manipulation functions. These must be static as the input
// |slot_span| pointer may be the result of an offset calculation and
// therefore cannot be trusted. The objective of these functions is to
// sanitize this input.
ALWAYS_INLINE static void* ToSlotSpanStartPtr(
const SlotSpanMetadata* slot_span);
ALWAYS_INLINE static SlotSpanMetadata* FromSlotStartPtr(void* slot_start);
ALWAYS_INLINE static SlotSpanMetadata* FromSlotInnerPtr(void* ptr);
// Checks if it is feasible to store raw_size.
ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size; }
// The caller is responsible for ensuring that raw_size can be stored before
// calling Set/GetRawSize.
ALWAYS_INLINE void SetRawSize(size_t raw_size);
ALWAYS_INLINE size_t GetRawSize() const;
ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
// Returns size of the region used within a slot. The used region comprises
// of actual allocated data, extras and possibly empty space in the middle.
ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
// The returned size can be:
// - The slot size for small buckets.
// - Exact size needed to satisfy allocation (incl. extras), for large
// buckets and direct-mapped allocations (see also the comment in
// CanStoreRawSize() for more info).
if (LIKELY(!CanStoreRawSize())) {
return bucket->slot_size;
}
return GetRawSize();
}
// Returns the size available to the app. It can be equal or higher than the
// requested size. If higher, the overage won't exceed what's actually usable
// by the app without a risk of running out of an allocated region or into
// PartitionAlloc's internal data (like extras).
ALWAYS_INLINE size_t GetUsableSize(PartitionRoot<thread_safe>* root) const {
// The returned size can be:
// - The slot size minus extras, for small buckets. This could be more than
// requested size.
// - Raw size minus extras, for large buckets and direct-mapped allocations
// (see also the comment in CanStoreRawSize() for more info). This is
// equal to requested size.
size_t size_to_ajdust;
if (LIKELY(!CanStoreRawSize())) {
size_to_ajdust = bucket->slot_size;
} else {
size_to_ajdust = GetRawSize();
}
return root->AdjustSizeForExtrasSubtract(size_to_ajdust);
}
// Returns the total size of the slots that are currently provisioned.
ALWAYS_INLINE size_t GetProvisionedSize() const {
size_t num_provisioned_slots =
bucket->get_slots_per_span() - num_unprovisioned_slots;
size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
return provisioned_size;
}
ALWAYS_INLINE void Reset();
// TODO(ajwong): Can this be made private? https://crbug.com/787153
BASE_EXPORT static SlotSpanMetadata* get_sentinel_slot_span();
// Page State accessors.
// Note that it's only valid to call these functions on pages found on one of
// the page lists. Specifically, you can't call these functions on full pages
// that were detached from the active list.
//
// This restriction provides the flexibity for some of the status fields to
// be repurposed when a page is taken off a list. See the negation of
// |num_allocated_slots| when a full page is removed from the active list
// for an example of such repurposing.
ALWAYS_INLINE bool is_active() const;
ALWAYS_INLINE bool is_full() const;
ALWAYS_INLINE bool is_empty() const;
ALWAYS_INLINE bool is_decommitted() const;
private:
// sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
// span in the active list. We could use nullptr, but in that case we need to
// add a null-check branch to the hot allocation path. We want to avoid that.
//
// Note, this declaration is kept in the header as opposed to an anonymous
// namespace so the getter can be fully inlined.
static SlotSpanMetadata sentinel_slot_span_;
// For the sentinel.
constexpr SlotSpanMetadata() noexcept
: bucket(nullptr), can_store_raw_size(false) {}
};
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot.");
// Metadata of a non-first partition page in a slot span.
struct SubsequentPageMetadata {
// Raw size is the size needed to satisfy the allocation (requested size +
// extras). If available, it can be used to report better statistics or to
// bring protective cookie closer to the allocated memory.
//
// It can be used only if:
// - there is no more than one slot in the slot span (otherwise we wouldn't
// know which slot the raw size applies to)
// - there is more than one partition page in the slot span (the metadata of
// the first one is used to store slot information, but the second one is
// available for extra information)
size_t raw_size;
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
// We don't need to use PartitionRefCount directly, because:
// - the ref count is used only when CanStoreRawSize() is true.
// - we will construct PartitionRefCount when allocating memory (inside
// AllocFlagsNoHooks), not when allocating SubsequentPageMetadata.
uint8_t ref_count_buffer[sizeof(base::internal::PartitionRefCount)];
#endif
};
// Each partition page has metadata associated with it. The metadata of the
// first page of a slot span, describes that slot span. If a slot span spans
// more than 1 page, the page metadata may contain rudimentary additional
// information.
template <bool thread_safe>
struct PartitionPage {
// "Pack" the union so that slot_span_metadata_offset still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
union __attribute__((packed)) {
SlotSpanMetadata<thread_safe> slot_span_metadata;
SubsequentPageMetadata subsequent_page_metadata;
// sizeof(PartitionPageMetadata) must always be:
// - a power of 2 (for fast modulo operations)
// - below kPageMetadataSize
//
// This makes sure that this is respected no matter the architecture.
char optional_padding[kPageMetadataSize - sizeof(uint16_t)];
};
// The first PartitionPage of the slot span holds its metadata. This offset
// tells how many pages in from that first page we are.
uint16_t slot_span_metadata_offset;
ALWAYS_INLINE static PartitionPage* FromSlotStartPtr(void* slot_start);
ALWAYS_INLINE static PartitionPage* FromSlotInnerPtr(void* ptr);
private:
ALWAYS_INLINE static void* ToSlotSpanStartPtr(const PartitionPage* page);
};
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
static_assert(sizeof(PartitionPage<NotThreadSafe>) == kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
// Certain functions rely on PartitionPage being either SlotSpanMetadata or
// SubsequentPageMetadata, and therefore freely casting between each other.
static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
0,
"");
static_assert(offsetof(PartitionPage<NotThreadSafe>, slot_span_metadata) == 0,
"");
static_assert(offsetof(PartitionPage<NotThreadSafe>,
subsequent_page_metadata) == 0,
"");
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
// ref_count_buffer is used as PartitionRefCount. We need to make the buffer
// aignof(base::internal::PartitionRefCount)-aligned. Otherwise, misalignment
// crash will be observed.
static_assert(offsetof(SubsequentPageMetadata, ref_count_buffer) %
alignof(base::internal::PartitionRefCount) ==
0,
"");
#endif
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + SystemPageSize());
}
// Size that should be reserved for 2 back-to-back quarantine bitmaps (if
// present) inside a super page. Elements of a super page are
// partition-page-aligned, hence the returned size is a multiple of partition
// page size.
ALWAYS_INLINE size_t ReservedQuarantineBitmapsSize() {
return (2 * sizeof(QuarantineBitmap) + PartitionPageSize() - 1) &
PartitionPageBaseMask();
}
// Size that should be committed for 2 back-to-back quarantine bitmaps (if
// present) inside a super page. It is a multiple of system page size.
ALWAYS_INLINE size_t CommittedQuarantineBitmapsSize() {
return (2 * sizeof(QuarantineBitmap) + SystemPageSize() - 1) &
SystemPageBaseMask();
}
// Returns the pointer to the first, of two, quarantine bitmap in the super
// page. It's the caller's responsibility to ensure that the bitmaps even exist.
ALWAYS_INLINE QuarantineBitmap* SuperPageQuarantineBitmaps(
char* super_page_base) {
PA_DCHECK(
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
return reinterpret_cast<QuarantineBitmap*>(super_page_base +
PartitionPageSize());
}
ALWAYS_INLINE char* SuperPagePayloadBegin(char* super_page_base,
bool with_quarantine) {
PA_DCHECK(
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
return super_page_base + PartitionPageSize() +
(with_quarantine ? ReservedQuarantineBitmapsSize() : 0);
}
ALWAYS_INLINE char* SuperPagePayloadEnd(char* super_page_base) {
PA_DCHECK(
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
return super_page_base + kSuperPageSize - PartitionPageSize();
}
ALWAYS_INLINE bool IsWithinSuperPagePayload(char* ptr, bool with_quarantine) {
PA_DCHECK(!IsManagedByPartitionAllocDirectMap(ptr));
char* super_page_base = reinterpret_cast<char*>(
reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
char* payload_start = SuperPagePayloadBegin(super_page_base, with_quarantine);
char* payload_end = SuperPagePayloadEnd(super_page_base);
return ptr >= payload_start && ptr < payload_end;
}
// Returns the start of a slot or nullptr if |maybe_inner_ptr| is not inside of
// an existing slot span. The function may return a pointer even inside a
// decommitted or free slot span, it's the caller responsibility to check if
// memory is actually allocated.
// The precondition is that |maybe_inner_ptr| must point to payload of a valid
// super page.
template <bool thread_safe>
ALWAYS_INLINE char* GetSlotStartInSuperPage(char* maybe_inner_ptr) {
#if DCHECK_IS_ON()
PA_DCHECK(!IsManagedByPartitionAllocDirectMap(maybe_inner_ptr));
char* super_page_ptr = reinterpret_cast<char*>(
reinterpret_cast<uintptr_t>(maybe_inner_ptr) & kSuperPageBaseMask);
auto* extent = reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
PartitionSuperPageToMetadataArea(super_page_ptr));
PA_DCHECK(
IsWithinSuperPagePayload(maybe_inner_ptr, extent->root->IsScanEnabled()));
#endif
auto* slot_span =
SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(maybe_inner_ptr);
// Check if the slot span is actually used and valid.
if (!slot_span->bucket)
return nullptr;
#if DCHECK_IS_ON()
PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
#endif
char* const slot_span_begin = static_cast<char*>(
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
const ptrdiff_t ptr_offset = maybe_inner_ptr - slot_span_begin;
#if DCHECK_IS_ON()
PA_DCHECK(0 <= ptr_offset &&
ptr_offset < static_cast<ptrdiff_t>(
slot_span->bucket->get_pages_per_slot_span() *
PartitionPageSize()));
#endif
// Slot span size in bytes is not necessarily multiple of partition page.
if (ptr_offset >=
static_cast<ptrdiff_t>(slot_span->bucket->get_bytes_per_span()))
return nullptr;
const size_t slot_size = slot_span->bucket->slot_size;
const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
char* const result = slot_span_begin + (slot_number * slot_size);
PA_DCHECK(result <= maybe_inner_ptr && maybe_inner_ptr < result + slot_size);
return result;
}
// Converts from a pointer to the PartitionPage object (within super pages's
// metadata) into a pointer to the beginning of the slot span.
// |page| must be the first PartitionPage of the slot span.
template <bool thread_safe>
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToSlotSpanStartPtr(
const PartitionPage* page) {
PA_DCHECK(!page->slot_span_metadata_offset);
return SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
&page->slot_span_metadata);
}
// Converts from a pointer inside a slot into a pointer to the PartitionPage
// object (within super pages's metadata) that describes the first partition
// page of a slot span containing that slot.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromSlotInnerPtr(void* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> PartitionPageShift();
// Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages.
PA_DCHECK(partition_page_index);
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
// Partition pages in the same slot span share the same slot span metadata
// object (located in the first PartitionPage object of that span). Adjust
// for that.
page -= page->slot_span_metadata_offset;
return page;
}
// Like |FromSlotInnerPtr|, but asserts that pointer points to the beginning of
// the slot.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromSlotStartPtr(void* slot_start) {
auto* page = FromSlotInnerPtr(slot_start);
// Checks that the pointer is a multiple of slot size.
auto* slot_span_start = ToSlotSpanStartPtr(page);
PA_DCHECK(!((reinterpret_cast<uintptr_t>(slot_start) -
reinterpret_cast<uintptr_t>(slot_span_start)) %
page->slot_span_metadata.bucket->slot_size));
return page;
}
// Converts from a pointer to the SlotSpanMetadata object (within super pages's
// metadata) into a pointer to the beginning of the slot span.
template <bool thread_safe>
ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
const SlotSpanMetadata* slot_span) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
// A valid |page| must be past the first guard System page and within
// the following metadata region.
PA_DCHECK(super_page_offset > SystemPageSize());
// Must be less than total metadata region.
PA_DCHECK(super_page_offset <
SystemPageSize() +
(NumPartitionPagesPerSuperPage() * kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - SystemPageSize()) >> kPageMetadataShift;
// Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages.
PA_DCHECK(partition_page_index);
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << PartitionPageShift()));
return ret;
}
// Converts from a pointer inside a slot into a pointer to the SlotSpanMetadata
// object (within super pages's metadata) that describes the slot span
// containing that slot.
template <bool thread_safe>
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(void* ptr) {
auto* page = PartitionPage<thread_safe>::FromSlotInnerPtr(ptr);
PA_DCHECK(!page->slot_span_metadata_offset);
return &page->slot_span_metadata;
}
// Like |FromSlotInnerPtr|, but asserts that pointer points to the beginning of
// the slot.
template <bool thread_safe>
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromSlotStartPtr(void* slot_start) {
auto* page = PartitionPage<thread_safe>::FromSlotStartPtr(slot_start);
PA_DCHECK(!page->slot_span_metadata_offset);
return &page->slot_span_metadata;
}
template <bool thread_safe>
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(size_t raw_size) {
PA_DCHECK(CanStoreRawSize());
auto* the_next_page = reinterpret_cast<PartitionPage<thread_safe>*>(this) + 1;
the_next_page->subsequent_page_metadata.raw_size = raw_size;
}
template <bool thread_safe>
ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
PA_DCHECK(CanStoreRawSize());
auto* the_next_page =
reinterpret_cast<const PartitionPage<thread_safe>*>(this) + 1;
return the_next_page->subsequent_page_metadata.raw_size;
}
template <bool thread_safe>
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
PartitionFreelistEntry* new_head) {
PA_DCHECK(!new_head ||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
(reinterpret_cast<uintptr_t>(new_head) & kSuperPageBaseMask));
freelist_head = new_head;
}
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap
SlotSpanMetadata<thread_safe>::Free(void* slot_start) {
#if DCHECK_IS_ON()
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
root->lock_.AssertAcquired();
#endif
PA_DCHECK(num_allocated_slots);
// Catches an immediate double free.
PA_CHECK(slot_start != freelist_head);
// Look for double free one level deeper in debug.
PA_DCHECK(!freelist_head || slot_start != freelist_head->GetNext());
auto* entry = static_cast<internal::PartitionFreelistEntry*>(slot_start);
entry->SetNext(freelist_head);
SetFreelistHead(entry);
--num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) {
return FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the raw size.
PA_DCHECK(!CanStoreRawSize());
}
return {};
}
template <bool thread_safe>
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
PA_DCHECK(this != get_sentinel_slot_span());
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
template <bool thread_safe>
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
PA_DCHECK(this != get_sentinel_slot_span());
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
PA_DCHECK(!freelist_head);
PA_DCHECK(!num_unprovisioned_slots);
}
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
PA_DCHECK(this != get_sentinel_slot_span());
return (!num_allocated_slots && freelist_head);
}
template <bool thread_safe>
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
PA_DCHECK(this != get_sentinel_slot_span());
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
PA_DCHECK(!num_unprovisioned_slots);
PA_DCHECK(empty_cache_index == -1);
}
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
PA_DCHECK(is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
PA_DCHECK(num_unprovisioned_slots);
next_slot_span = nullptr;
}
ALWAYS_INLINE void DeferredUnmap::Run() {
if (UNLIKELY(ptr)) {
Unmap();
}
}
enum class QuarantineBitmapType { kMutator, kScanner };
ALWAYS_INLINE QuarantineBitmap* QuarantineBitmapFromPointer(
QuarantineBitmapType type,
size_t pcscan_epoch,
void* ptr) {
PA_DCHECK(!IsManagedByPartitionAllocDirectMap(ptr));
auto* super_page_base = reinterpret_cast<char*>(
reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
auto* first_bitmap = SuperPageQuarantineBitmaps(super_page_base);
auto* second_bitmap = first_bitmap + 1;
if (type == QuarantineBitmapType::kScanner)
std::swap(first_bitmap, second_bitmap);
return (pcscan_epoch & 1) ? second_bitmap : first_bitmap;
}
// Iterates over all active and full slot spans in a super-page. Returns number
// of the visited slot spans. |Callback| must return a bool indicating whether
// the slot was visited (true) or skipped (false).
template <bool thread_safe, typename Callback>
size_t IterateSlotSpans(char* super_page_base,
bool with_quarantine,
Callback callback) {
#if DCHECK_IS_ON()
PA_DCHECK(
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
auto* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
PartitionSuperPageToMetadataArea(super_page_base));
extent_entry->root->lock_.AssertAcquired();
#endif
using Page = PartitionPage<thread_safe>;
auto* const first_page = Page::FromSlotStartPtr(
SuperPagePayloadBegin(super_page_base, with_quarantine));
// Call FromSlotInnerPtr instead of FromSlotStartPtr, because this slot span
// doesn't exist, hence its bucket isn't set up to properly assert the slot
// start.
auto* const last_page = Page::FromSlotInnerPtr(
SuperPagePayloadEnd(super_page_base) - PartitionPageSize());
size_t visited = 0;
for (auto* page = first_page;
page <= last_page && page->slot_span_metadata.bucket;
page += page->slot_span_metadata.bucket->get_pages_per_slot_span()) {
auto* slot_span = &page->slot_span_metadata;
if (callback(slot_span))
++visited;
}
return visited;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_

View File

@ -0,0 +1,66 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/partition_alloc_buildflags.h"
namespace base {
namespace internal {
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
// TODO(tasak): Optimize this function. There's potential for optimization in
// all callers of |PartitionRefCountPointer| (apart from marking it
// |ALWAYS_INLINE|):
// - |AllocFlagsNoHooks| and |FreeNoHooksImmediate| already "know"
// |usable_size|.
// - |AcquireInternal| and | ReleaseInternal| know |slot_span| (through the
// inlined |PartitionAllocGetSlotStart|).
PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
DCheckGetSlotOffsetIsZero(slot_start);
// Layout inside the slot of small buckets:
// |<---------------- slot size ----------------->
// |[cookie]|...data...|[empty]|[cookie]|[refcnt]|
// ^ ^
// | |
// slot_start partition_ref_count_ptr
//
// Layout inside the slot of single-slot spans (raw size is available)
// |<---------------------- slot size ------------------------>
// |[cookie]|...data...|[cookie]|[refcnt_placeholder]|[unused]|
//
// refcount is not stored in the slot (even though the space for it is still
// reserved). Instead, refcount is stored in the subsequent page metadata.
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStartPtr(slot_start);
PA_DCHECK(slot_span);
#if DCHECK_IS_ON()
PartitionCookieCheckValue(slot_start);
#endif
uint8_t* partition_ref_count_ptr;
if (UNLIKELY(slot_span->CanStoreRawSize())) {
auto* the_next_page =
reinterpret_cast<PartitionPage<ThreadSafe>*>(slot_span) + 1;
partition_ref_count_ptr =
the_next_page->subsequent_page_metadata.ref_count_buffer;
} else {
uint8_t* slot_start_ptr = reinterpret_cast<uint8_t*>(slot_start);
size_t ref_count_offset =
slot_span->bucket->slot_size - kInSlotRefCountBufferSize;
partition_ref_count_ptr = slot_start_ptr + ref_count_offset;
}
PA_DCHECK(reinterpret_cast<uintptr_t>(partition_ref_count_ptr) %
alignof(PartitionRefCount) ==
0);
return reinterpret_cast<PartitionRefCount*>(partition_ref_count_ptr);
}
#endif // BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
} // namespace internal
} // namespace base

View File

@ -0,0 +1,192 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
#include <atomic>
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/base_export.h"
#include "base/check_op.h"
#include "base/notreached.h"
#include "base/partition_alloc_buildflags.h"
#include "build/build_config.h"
namespace base {
namespace internal {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
// Special-purpose atomic reference count class used by BackupRefPtrImpl.
// The least significant bit of the count is reserved for tracking the liveness
// state of an allocation: it's set when the allocation is created and cleared
// on free(). So the count can be:
//
// 1 for an allocation that is just returned from Alloc()
// 2 * k + 1 for a "live" allocation with k references
// 2 * k for an allocation with k dangling references after Free()
//
// This protects against double-free's, as we check whether the reference count
// is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
class BASE_EXPORT PartitionRefCount {
public:
PartitionRefCount();
// Incrementing the counter doesn't imply any visibility about modified
// memory, hence relaxed atomics. For decrement, visibility is required before
// the memory gets freed, necessitating an acquire/release barrier before
// freeing the memory.
// For details, see base::AtomicRefCount, which has the same constraints and
// characteristics.
ALWAYS_INLINE void Acquire() {
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckCookie();
#endif
PA_CHECK(count_.fetch_add(2, std::memory_order_relaxed) > 0);
}
// Returns true if the allocation should be reclaimed.
ALWAYS_INLINE bool Release() {
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckCookie();
#endif
if (count_.fetch_sub(2, std::memory_order_release) == 2) {
// In most thread-safe reference count implementations, an acquire
// barrier is required so that all changes made to an object from other
// threads are visible to its destructor. In our case, the destructor
// finishes before the final `Release` call, so it shouldn't be a problem.
// However, we will keep it as a precautionary measure.
std::atomic_thread_fence(std::memory_order_acquire);
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
// The allocation is about to get freed, so clear the cookie.
brp_cookie_ = 0;
#endif
return true;
}
return false;
}
// Returns true if the allocation should be reclaimed.
// This function should be called by the allocator during Free().
ALWAYS_INLINE bool ReleaseFromAllocator() {
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckCookie();
#endif
int32_t old_count = count_.fetch_sub(1, std::memory_order_release);
PA_CHECK(old_count & 1); // double-free detection
if (old_count == 1) {
std::atomic_thread_fence(std::memory_order_acquire);
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
// The allocation is about to get freed, so clear the cookie.
brp_cookie_ = 0;
#endif
return true;
}
return false;
}
ALWAYS_INLINE bool HasOneRef() {
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckCookie();
#endif
return count_.load(std::memory_order_acquire) == 1;
}
ALWAYS_INLINE bool IsAlive() {
bool alive = count_.load(std::memory_order_relaxed) & 1;
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
if (alive)
CheckCookie();
#endif
return alive;
}
private:
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
// The cookie helps us ensure that:
// 1) The reference count pointer calculation is correct.
// 2) The returned allocation slot is not freed.
ALWAYS_INLINE void CheckCookie() {
PA_CHECK(brp_cookie_ == CalculateCookie());
}
ALWAYS_INLINE uint32_t CalculateCookie() {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
kCookieSalt;
}
static constexpr uint32_t kCookieSalt = 0xc01dbeef;
volatile uint32_t brp_cookie_;
#endif // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
std::atomic<int32_t> count_{1};
};
ALWAYS_INLINE PartitionRefCount::PartitionRefCount()
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
: brp_cookie_(CalculateCookie())
#endif
{
}
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
static_assert(base::kAlignment % alignof(PartitionRefCount) == 0,
"kAlignment must be multiples of alignof(PartitionRefCount).");
// Allocate extra space for the reference count to satisfy the alignment
// requirement.
static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
constexpr size_t kPartitionPastAllocationAdjustment = 0;
BASE_EXPORT PartitionRefCount* PartitionRefCountPointer(void* slot_start);
#else // BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
// Allocate extra space for the reference count to satisfy the alignment
// requirement.
static constexpr size_t kInSlotRefCountBufferSize = base::kAlignment;
constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
// This is for adjustment of pointers right past the allocation, which may point
// to the next slot. First subtract 1 to bring them to the intended slot, and
// only then we'll be able to find ref-count in that slot.
constexpr size_t kPartitionPastAllocationAdjustment = 1;
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
DCheckGetSlotOffsetIsZero(slot_start);
return reinterpret_cast<PartitionRefCount*>(slot_start);
}
#endif // BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
"PartitionRefCount should fit into the in-slot buffer.");
#else // BUILDFLAG(USE_BACKUP_REF_PTR)
static constexpr size_t kInSlotRefCountBufferSize = 0;
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_

View File

@ -0,0 +1,965 @@
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/pcscan.h"
#include "base/bits.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "wow64apiset.h"
#endif
#if defined(OS_LINUX)
#include <pthread.h>
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
#endif
namespace base {
namespace {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if defined(OS_LINUX)
// NO_THREAD_SAFETY_ANALYSIS: acquires the lock and doesn't release it, by
// design.
void BeforeForkInParent() NO_THREAD_SAFETY_ANALYSIS {
auto* regular_root = internal::PartitionAllocMalloc::Allocator();
regular_root->lock_.Lock();
auto* original_root = internal::PartitionAllocMalloc::OriginalAllocator();
if (original_root)
original_root->lock_.Lock();
auto* aligned_root = internal::PartitionAllocMalloc::AlignedAllocator();
if (aligned_root != regular_root)
aligned_root->lock_.Lock();
internal::ThreadCacheRegistry::GetLock().Lock();
}
void ReleaseLocks() NO_THREAD_SAFETY_ANALYSIS {
// In reverse order, even though there are no lock ordering dependencies.
internal::ThreadCacheRegistry::GetLock().Unlock();
auto* regular_root = internal::PartitionAllocMalloc::Allocator();
auto* aligned_root = internal::PartitionAllocMalloc::AlignedAllocator();
if (aligned_root != regular_root)
aligned_root->lock_.Unlock();
auto* original_root = internal::PartitionAllocMalloc::OriginalAllocator();
if (original_root)
original_root->lock_.Unlock();
regular_root->lock_.Unlock();
}
void AfterForkInParent() {
ReleaseLocks();
}
void AfterForkInChild() {
ReleaseLocks();
// Unsafe, as noted in the name. This is fine here however, since at this
// point there is only one thread, this one (unless another post-fork()
// handler created a thread, but it would have needed to allocate, which would
// have deadlocked the process already).
//
// If we don't reclaim this memory, it is lost forever. Note that this is only
// really an issue if we fork() a multi-threaded process without calling
// exec() right away, which is discouraged.
internal::ThreadCacheRegistry::Instance()
.ForcePurgeAllThreadAfterForkUnsafe();
}
#endif // defined(OS_LINUX)
std::atomic<bool> g_global_init_called;
void PartitionAllocMallocInitOnce() {
bool expected = false;
// No need to block execution for potential concurrent initialization, merely
// want to make sure this is only called once.
if (!g_global_init_called.compare_exchange_strong(expected, true))
return;
#if defined(OS_LINUX)
// When fork() is called, only the current thread continues to execute in the
// child process. If the lock is held, but *not* by this thread when fork() is
// called, we have a deadlock.
//
// The "solution" here is to acquire the lock on the forking thread before
// fork(), and keep it held until fork() is done, in the parent and the
// child. To clean up memory, we also must empty the thread caches in the
// child, which is easier, since no threads except for the current one are
// running right after the fork().
//
// This is not perfect though, since:
// - Multiple pre/post-fork() handlers can be registered, they are then run in
// LIFO order for the pre-fork handler, and FIFO order for the post-fork
// one. So unless we are the first to register a handler, if another handler
// allocates, then we deterministically deadlock.
// - pthread handlers are *not* called when the application calls clone()
// directly, which is what Chrome does to launch processes.
//
// However, no perfect solution really exists to make threads + fork()
// cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
// and other malloc() implementations use the same techniques.
int err =
pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
PA_CHECK(err == 0);
#endif // defined(OS_LINUX)
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace
namespace internal {
template <bool thread_safe>
static size_t PartitionPurgeSlotSpan(
internal::SlotSpanMetadata<thread_safe>* slot_span,
bool discard) {
const internal::PartitionBucket<thread_safe>* bucket = slot_span->bucket;
size_t slot_size = bucket->slot_size;
if (slot_size < SystemPageSize() || !slot_span->num_allocated_slots)
return 0;
size_t bucket_num_slots = bucket->get_slots_per_span();
size_t discardable_bytes = 0;
if (slot_span->CanStoreRawSize()) {
uint32_t used_bytes =
static_cast<uint32_t>(RoundUpToSystemPage(slot_span->GetRawSize()));
discardable_bytes = bucket->slot_size - used_bytes;
if (discardable_bytes && discard) {
char* ptr = reinterpret_cast<char*>(
internal::SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
slot_span));
ptr += used_bytes;
DiscardSystemPages(ptr, discardable_bytes);
}
return discardable_bytes;
}
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
constexpr size_t kMaxSlotCount =
(PartitionPageSize() * kMaxPartitionPagesPerSlotSpan) / SystemPageSize();
#elif defined(OS_APPLE)
// It's better for slot_usage to be stack-allocated and fixed-size, which
// demands that its size be constexpr. On OS_APPLE, PartitionPageSize() is
// always SystemPageSize() << 2, so regardless of what the run time page size
// is, kMaxSlotCount can always be simplified to this expression.
constexpr size_t kMaxSlotCount = 4 * kMaxPartitionPagesPerSlotSpan;
PA_CHECK(kMaxSlotCount ==
(PartitionPageSize() * kMaxPartitionPagesPerSlotSpan) /
SystemPageSize());
#endif
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - slot_span->num_unprovisioned_slots;
char slot_usage[kMaxSlotCount];
#if !defined(OS_WIN)
// The last freelist entry should not be discarded when using OS_WIN.
// DiscardVirtualMemory makes the contents of discarded memory undefined.
size_t last_slot = static_cast<size_t>(-1);
#endif
memset(slot_usage, 1, num_slots);
char* ptr = reinterpret_cast<char*>(
internal::SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
// First, walk the freelist for this slot span and make a bitmap of which
// slots are not in use.
for (internal::PartitionFreelistEntry* entry = slot_span->freelist_head;
entry;
/**/) {
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
entry = entry->GetNext();
#if !defined(OS_WIN)
// If we have a slot where the masked freelist entry is 0, we can actually
// discard that freelist entry because touching a discarded page is
// guaranteed to return original content or 0. (Note that this optimization
// won't fire on big-endian machines because the masking function is
// negation.)
if (!internal::PartitionFreelistEntry::Encode(entry))
last_slot = slot_index;
#endif
}
// If the slot(s) at the end of the slot span are not in used, we can truncate
// them entirely and rewrite the freelist.
size_t truncated_slots = 0;
while (!slot_usage[num_slots - 1]) {
truncated_slots++;
num_slots--;
PA_DCHECK(num_slots);
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
if (truncated_slots) {
size_t unprovisioned_bytes = 0;
char* begin_ptr = ptr + (num_slots * slot_size);
char* end_ptr = begin_ptr + (slot_size * truncated_slots);
begin_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
// We round the end pointer here up and not down because we're at the end of
// a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
PA_DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes;
}
if (unprovisioned_bytes && discard) {
PA_DCHECK(truncated_slots > 0);
size_t num_new_entries = 0;
slot_span->num_unprovisioned_slots +=
static_cast<uint16_t>(truncated_slots);
// Rewrite the freelist.
internal::PartitionFreelistEntry* head = nullptr;
internal::PartitionFreelistEntry* back = head;
for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
if (slot_usage[slot_index])
continue;
auto* entry = new (ptr + (slot_size * slot_index))
internal::PartitionFreelistEntry();
if (!head) {
head = entry;
back = entry;
} else {
back->SetNext(entry);
back = entry;
}
num_new_entries++;
#if !defined(OS_WIN)
last_slot = slot_index;
#endif
}
slot_span->SetFreelistHead(head);
PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
}
// Next, walk the slots and for any not in use, consider where the system page
// boundaries occur. We can release any system pages back to the system as
// long as we don't interfere with a freelist pointer or an adjacent slot.
for (size_t i = 0; i < num_slots; ++i) {
if (slot_usage[i])
continue;
// The first address we can safely discard is just after the freelist
// pointer. There's one quirk: if the freelist pointer is actually nullptr,
// we can discard that pointer value too.
char* begin_ptr = ptr + (i * slot_size);
char* end_ptr = begin_ptr + slot_size;
#if !defined(OS_WIN)
if (i != last_slot)
begin_ptr += sizeof(internal::PartitionFreelistEntry);
#else
begin_ptr += sizeof(internal::PartitionFreelistEntry);
#endif
begin_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
end_ptr = reinterpret_cast<char*>(
RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
if (begin_ptr < end_ptr) {
size_t partial_slot_bytes = end_ptr - begin_ptr;
discardable_bytes += partial_slot_bytes;
if (discard)
DiscardSystemPages(begin_ptr, partial_slot_bytes);
}
}
return discardable_bytes;
}
template <bool thread_safe>
static void PartitionPurgeBucket(
internal::PartitionBucket<thread_safe>* bucket) {
if (bucket->active_slot_spans_head !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket->active_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
PA_DCHECK(
slot_span !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
PartitionPurgeSlotSpan(slot_span, true);
}
}
}
template <bool thread_safe>
static void PartitionDumpSlotSpanStats(
PartitionBucketMemoryStats* stats_out,
internal::SlotSpanMetadata<thread_safe>* slot_span) {
uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
if (slot_span->is_decommitted()) {
++stats_out->num_decommitted_slot_spans;
return;
}
stats_out->discardable_bytes += PartitionPurgeSlotSpan(slot_span, false);
if (slot_span->CanStoreRawSize()) {
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
} else {
stats_out->active_bytes +=
(slot_span->num_allocated_slots * stats_out->bucket_slot_size);
}
size_t slot_span_bytes_resident = RoundUpToSystemPage(
(bucket_num_slots - slot_span->num_unprovisioned_slots) *
stats_out->bucket_slot_size);
stats_out->resident_bytes += slot_span_bytes_resident;
if (slot_span->is_empty()) {
stats_out->decommittable_bytes += slot_span_bytes_resident;
++stats_out->num_empty_slot_spans;
} else if (slot_span->is_full()) {
++stats_out->num_full_slot_spans;
} else {
PA_DCHECK(slot_span->is_active());
++stats_out->num_active_slot_spans;
}
}
template <bool thread_safe>
static void PartitionDumpBucketStats(
PartitionBucketMemoryStats* stats_out,
const internal::PartitionBucket<thread_safe>* bucket) {
PA_DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false;
// If the active slot span list is empty (==
// internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
// still need to be reported if it has a list of empty, decommitted or full
// slot spans.
if (bucket->active_slot_spans_head ==
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() &&
!bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
!bucket->num_full_slot_spans)
return;
memset(stats_out, '\0', sizeof(*stats_out));
stats_out->is_valid = true;
stats_out->is_direct_map = false;
stats_out->num_full_slot_spans =
static_cast<size_t>(bucket->num_full_slot_spans);
stats_out->bucket_slot_size = bucket->slot_size;
uint16_t bucket_num_slots = bucket->get_slots_per_span();
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
stats_out->resident_bytes =
bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket->empty_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
PartitionDumpSlotSpanStats(stats_out, slot_span);
}
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket->decommitted_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
PA_DCHECK(slot_span->is_decommitted());
PartitionDumpSlotSpanStats(stats_out, slot_span);
}
if (bucket->active_slot_spans_head !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket->active_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
PA_DCHECK(
slot_span !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
PartitionDumpSlotSpanStats(stats_out, slot_span);
}
}
}
#if DCHECK_IS_ON()
void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr) {
PA_DCHECK(IsManagedByPartitionAllocNormalBuckets(ptr));
}
#endif
} // namespace internal
template <bool thread_safe>
[[noreturn]] NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(
size_t size) {
#if !defined(ARCH_CPU_64_BITS)
const size_t virtual_address_space_size =
total_size_of_super_pages.load(std::memory_order_relaxed) +
total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
const size_t uncommitted_size =
virtual_address_space_size -
total_size_of_committed_pages.load(std::memory_order_relaxed);
// Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387.
if (uncommitted_size > kReasonableSizeOfUnusedPages) {
internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
}
#if defined(OS_WIN)
// If true then we are running on 64-bit Windows.
BOOL is_wow_64 = FALSE;
// Intentionally ignoring failures.
IsWow64Process(GetCurrentProcess(), &is_wow_64);
// 32-bit address space on Windows is typically either 2 GiB (on 32-bit
// Windows) or 4 GiB (on 64-bit Windows). 2.8 and 1.0 GiB are just rough
// guesses as to how much address space PA can consume (note that code,
// stacks, and other allocators will also consume address space).
const size_t kReasonableVirtualSize = (is_wow_64 ? 2800 : 1024) * 1024 * 1024;
// Make it obvious whether we are running on 64-bit Windows.
base::debug::Alias(&is_wow_64);
#else
constexpr size_t kReasonableVirtualSize =
// 1.5GiB elsewhere, since address space is typically 3GiB.
(1024 + 512) * 1024 * 1024;
#endif
if (virtual_address_space_size > kReasonableVirtualSize) {
internal::PartitionOutOfMemoryWithLargeVirtualSize(
virtual_address_space_size);
}
// Make the virtual size visible to crash reports all the time.
base::debug::Alias(&virtual_address_space_size);
#endif
if (internal::g_oom_handling_function)
(*internal::g_oom_handling_function)(size);
OOM_CRASH(size);
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::DecommitEmptySlotSpans() {
for (SlotSpan*& slot_span : global_empty_slot_span_ring) {
if (slot_span)
slot_span->DecommitIfPossible(this);
slot_span = nullptr;
}
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
{
ScopedGuard guard{lock_};
if (initialized)
return;
#if defined(PA_HAS_64_BITS_POINTERS)
// Reserve address space for partition alloc.
if (features::IsPartitionAllocGigaCageEnabled())
internal::PartitionAddressSpace::Init();
#endif
// If alignment needs to be enforced, disallow adding a cookie and/or
// ref-count at the beginning of the slot.
if (opts.alignment == PartitionOptions::Alignment::kAlignedAlloc) {
allow_cookies = false;
allow_ref_count = false;
// There should be no configuration where aligned root and ref-count are
// requested at the same time. In theory REF_COUNT_AT_END_OF_ALLOCATION
// allows these to co-exist, but in this case aligned root is not even
// created.
PA_CHECK(opts.ref_count == PartitionOptions::RefCount::kDisabled);
} else {
allow_cookies = true;
// Allow ref-count if it's explicitly requested *and* GigaCage is enabled.
// Without GigaCage it'd be unused, thus wasteful.
allow_ref_count =
(opts.ref_count == PartitionOptions::RefCount::kEnabled) &&
features::IsPartitionAllocGigaCageEnabled();
}
#if PARTITION_EXTRAS_REQUIRED
extras_size = 0;
extras_offset = 0;
if (allow_cookies) {
extras_size += internal::kPartitionCookieSizeAdjustment;
extras_offset += internal::kPartitionCookieOffsetAdjustment;
}
if (allow_ref_count) {
// TODO(tasak): In the REF_COUNT_AT_END_OF_ALLOCATION case, ref-count is
// stored out-of-line for single-slot slot spans, so no need to
// add/subtract its size in this case.
extras_size += internal::kPartitionRefCountSizeAdjustment;
extras_offset += internal::kPartitionRefCountOffsetAdjustment;
}
#endif
quarantine_mode =
#if PA_ALLOW_PCSCAN
(opts.quarantine == PartitionOptions::Quarantine::kDisallowed
? QuarantineMode::kAlwaysDisabled
: QuarantineMode::kDisabledByDefault);
#else
QuarantineMode::kAlwaysDisabled;
#endif
// We mark the sentinel slot span as free to make sure it is skipped by our
// logic to find a new active slot span.
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
// This is a "magic" value so we can test if a root pointer is valid.
inverted_self = ~reinterpret_cast<uintptr_t>(this);
// Set up the actual usable buckets first.
// Note that typical values (i.e. min allocation size of 8) will result in
// pseudo buckets (size==9 etc. or more generally, size is not a multiple
// of the smallest allocation granularity).
// We avoid them in the bucket lookup map, but we tolerate them to keep the
// code simpler and the structures more generic.
size_t i, j;
size_t current_size = kSmallestBucket;
size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
Bucket* bucket = &buckets[0];
for (i = 0; i < kNumBucketedOrders; ++i) {
for (j = 0; j < kNumBucketsPerOrder; ++j) {
bucket->Init(current_size);
// Disable pseudo buckets so that touching them faults.
if (current_size % kSmallestBucket)
bucket->active_slot_spans_head = nullptr;
current_size += current_increment;
++bucket;
}
current_increment <<= 1;
}
PA_DCHECK(current_size == 1 << kMaxBucketedOrder);
PA_DCHECK(bucket == &buckets[0] + kNumBuckets);
#if !defined(PA_THREAD_CACHE_SUPPORTED)
// TLS in ThreadCache not supported on other OSes.
with_thread_cache = false;
#else
internal::ThreadCache::EnsureThreadSpecificDataInitialized();
with_thread_cache =
(opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
if (with_thread_cache)
internal::ThreadCache::Init(this);
#endif // !defined(PA_THREAD_CACHE_SUPPORTED)
initialized = true;
}
// Called without the lock, might allocate.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PartitionAllocMallocInitOnce();
#endif
}
template <bool thread_safe>
PartitionRoot<thread_safe>::~PartitionRoot() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(!with_thread_cache)
<< "Must not destroy a partition with a thread cache";
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::ConfigureLazyCommit() {
#if defined(OS_WIN)
bool new_value =
base::FeatureList::IsEnabled(features::kPartitionAllocLazyCommit);
internal::ScopedGuard<thread_safe> guard{lock_};
if (use_lazy_commit != new_value) {
// Lazy commit can be turned off, but turning on isn't supported.
PA_DCHECK(use_lazy_commit);
use_lazy_commit = new_value;
for (auto* super_page_extent = first_extent; super_page_extent;
super_page_extent = super_page_extent->next) {
for (char* super_page = super_page_extent->super_page_base;
super_page != super_page_extent->super_pages_end;
super_page += kSuperPageSize) {
internal::IterateSlotSpans<thread_safe>(
super_page, IsQuarantineAllowed(),
[this](SlotSpan* slot_span) -> bool {
lock_.AssertAcquired();
size_t provisioned_size = slot_span->GetProvisionedSize();
size_t size_to_commit = slot_span->bucket->get_bytes_per_span();
if (slot_span->is_decommitted()) {
return false;
}
if (slot_span->is_full()) {
PA_DCHECK(provisioned_size == size_to_commit);
return false;
}
PA_DCHECK(size_to_commit % SystemPageSize() == 0);
size_t already_committed_size =
bits::AlignUp(provisioned_size, SystemPageSize());
// Free & decommitted slot spans are skipped.
PA_DCHECK(already_committed_size > 0);
if (size_to_commit > already_committed_size) {
char* slot_span_start = reinterpret_cast<char*>(
SlotSpan::ToSlotSpanStartPtr(slot_span));
RecommitSystemPagesForData(
slot_span_start + already_committed_size,
size_to_commit - already_committed_size,
PageUpdatePermissions);
}
return true;
});
}
}
}
#endif // defined(OS_WIN)
}
template <bool thread_safe>
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
internal::SlotSpanMetadata<thread_safe>* slot_span,
size_t requested_size) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
// Note that the new size isn't a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
size_t new_slot_size = GetDirectMapSlotSize(raw_size);
if (new_slot_size < kMinDirectMappedDownsize)
return false;
// bucket->slot_size is the current size of the allocation.
size_t current_slot_size = slot_span->bucket->slot_size;
char* slot_start =
static_cast<char*>(SlotSpan::ToSlotSpanStartPtr(slot_span));
if (new_slot_size == current_slot_size) {
// No need to move any memory around, but update size and cookie below.
// That's because raw_size may have changed.
} else if (new_slot_size < current_slot_size) {
size_t current_map_size =
DirectMapExtent::FromSlotSpan(slot_span)->map_size;
size_t new_map_size = GetDirectMapReservedSize(raw_size) -
GetDirectMapMetadataAndGuardPagesSize();
// Don't reallocate in-place if new map size would be less than 80 % of the
// current map size, to avoid holding on to too much unused address space.
if ((new_map_size / SystemPageSize()) * 5 <
(current_map_size / SystemPageSize()) * 4)
return false;
// Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommit_size = current_slot_size - new_slot_size;
DecommitSystemPagesForData(slot_start + new_slot_size, decommit_size,
PageUpdatePermissions);
} else if (new_slot_size <=
DirectMapExtent::FromSlotSpan(slot_span)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
RecommitSystemPagesForData(slot_start + current_slot_size,
recommit_slot_size_growth,
PageUpdatePermissions);
#if DCHECK_IS_ON()
memset(slot_start + current_slot_size, kUninitializedByte,
recommit_slot_size_growth);
#endif
} else {
// We can't perform the realloc in-place.
// TODO: support this too when possible.
return false;
}
slot_span->SetRawSize(raw_size);
slot_span->bucket->slot_size = new_slot_size;
#if DCHECK_IS_ON()
// Write a new trailing cookie.
if (allow_cookies) {
char* user_data_start =
static_cast<char*>(AdjustPointerForExtrasAdd(slot_start));
size_t usable_size = slot_span->GetUsableSize(this);
internal::PartitionCookieWriteValue(user_data_start + usable_size);
}
#endif
return true;
}
template <bool thread_safe>
bool PartitionRoot<thread_safe>::TryReallocInPlace(void* ptr,
SlotSpan* slot_span,
size_t new_size) {
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (AllocationCapacityFromRequestedSize(new_size) !=
AllocationCapacityFromPtr(ptr))
return false;
// Trying to allocate |new_size| would use the same amount of
// underlying memory as we're already using, so re-use the allocation
// after updating statistics (and cookies, if present).
if (slot_span->CanStoreRawSize()) {
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION) && DCHECK_IS_ON()
void* slot_start = AdjustPointerForExtrasSubtract(ptr);
internal::PartitionRefCount* old_ref_count;
if (allow_ref_count) {
PA_DCHECK(features::IsPartitionAllocGigaCageEnabled());
old_ref_count = internal::PartitionRefCountPointer(slot_start);
}
#endif // BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION)
size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
slot_span->SetRawSize(new_raw_size);
#if BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION) && DCHECK_IS_ON()
if (allow_ref_count) {
internal::PartitionRefCount* new_ref_count =
internal::PartitionRefCountPointer(slot_start);
PA_DCHECK(new_ref_count == old_ref_count);
}
#endif // BUILDFLAG(REF_COUNT_AT_END_OF_ALLOCATION) && DCHECK_IS_ON()
#if DCHECK_IS_ON()
// Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later).
if (allow_cookies) {
size_t usable_size = slot_span->GetUsableSize(this);
internal::PartitionCookieWriteValue(static_cast<char*>(ptr) +
usable_size);
}
#endif
}
return ptr;
}
template <bool thread_safe>
void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
void* ptr,
size_t new_size,
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
void* result = realloc(ptr, new_size);
PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
bool no_hooks = flags & PartitionAllocNoHooks;
if (UNLIKELY(!ptr)) {
return no_hooks ? AllocFlagsNoHooks(flags, new_size)
: AllocFlags(flags, new_size, type_name);
}
if (UNLIKELY(!new_size)) {
Free(ptr);
return nullptr;
}
if (new_size > MaxDirectMapped()) {
if (flags & PartitionAllocReturnNull)
return nullptr;
internal::PartitionExcessiveAllocationSize(new_size);
}
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
bool overridden = false;
size_t old_usable_size;
if (UNLIKELY(!no_hooks && hooks_enabled)) {
overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
&old_usable_size, ptr);
}
if (LIKELY(!overridden)) {
// |ptr| may have been allocated in another root.
SlotSpan* slot_span = SlotSpan::FromSlotInnerPtr(ptr);
auto* old_root = PartitionRoot::FromSlotSpan(slot_span);
bool success = false;
{
internal::ScopedGuard<thread_safe> guard{old_root->lock_};
// TODO(palmer): See if we can afford to make this a CHECK.
PA_DCHECK(IsValidSlotSpan(slot_span));
old_usable_size = slot_span->GetUsableSize(old_root);
if (UNLIKELY(slot_span->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
success = old_root->ReallocDirectMappedInPlace(slot_span, new_size);
}
}
if (success) {
if (UNLIKELY(!no_hooks && hooks_enabled)) {
PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
type_name);
}
return ptr;
}
if (old_root->TryReallocInPlace(ptr, slot_span, new_size))
return ptr;
}
// This realloc cannot be resized in-place. Sadness.
void* ret = no_hooks ? AllocFlagsNoHooks(flags, new_size)
: AllocFlags(flags, new_size, type_name);
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
internal::PartitionExcessiveAllocationSize(new_size);
}
memcpy(ret, ptr, std::min(old_usable_size, new_size));
Free(ptr);
return ret;
#endif
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
{
ScopedGuard guard{lock_};
// Avoid purging if there is PCScan task currently scheduled. Since pcscan
// takes snapshot of all allocated pages, decommitting pages here (even
// under the lock) is racy.
// TODO(bikineev): Consider rescheduling the purging after PCScan.
if (PCScan::Instance().IsInProgress())
return;
if (flags & PartitionPurgeDecommitEmptySlotSpans)
DecommitEmptySlotSpans();
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (Bucket& bucket : buckets) {
if (bucket.slot_size >= SystemPageSize())
internal::PartitionPurgeBucket(&bucket);
}
}
}
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* dumper) {
static const size_t kMaxReportableDirectMaps = 4096;
// Allocate on the heap rather than on the stack to avoid stack overflow
// skirmishes (on Windows, in particular). Allocate before locking below,
// otherwise when PartitionAlloc is malloc() we get reentrancy issues. This
// inflates reported values a bit for detailed dumps though, by 16kiB.
std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
if (!is_light_dump) {
direct_map_lengths =
std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
}
PartitionBucketMemoryStats bucket_stats[kNumBuckets];
size_t num_direct_mapped_allocations = 0;
PartitionMemoryStats stats = {0};
// Collect data with the lock held, cannot allocate or call third-party code
// below.
{
ScopedGuard guard{lock_};
stats.total_mmapped_bytes =
total_size_of_super_pages.load(std::memory_order_relaxed) +
total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
stats.total_committed_bytes =
total_size_of_committed_pages.load(std::memory_order_relaxed);
size_t direct_mapped_allocations_total_size = 0;
for (size_t i = 0; i < kNumBuckets; ++i) {
const Bucket* bucket = &bucket_at(i);
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
// PartitionRoot::Init() for details).
if (!bucket->active_slot_spans_head)
bucket_stats[i].is_valid = false;
else
internal::PartitionDumpBucketStats(&bucket_stats[i], bucket);
if (bucket_stats[i].is_valid) {
stats.total_resident_bytes += bucket_stats[i].resident_bytes;
stats.total_active_bytes += bucket_stats[i].active_bytes;
stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
}
}
for (DirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
PA_DCHECK(!extent->next_extent ||
extent->next_extent->prev_extent == extent);
size_t slot_size = extent->bucket->slot_size;
direct_mapped_allocations_total_size += slot_size;
if (is_light_dump)
continue;
direct_map_lengths[num_direct_mapped_allocations] = slot_size;
}
stats.total_resident_bytes += direct_mapped_allocations_total_size;
stats.total_active_bytes += direct_mapped_allocations_total_size;
stats.has_thread_cache = with_thread_cache;
if (stats.has_thread_cache) {
internal::ThreadCacheRegistry::Instance().DumpStats(
true, &stats.current_thread_cache_stats);
internal::ThreadCacheRegistry::Instance().DumpStats(
false, &stats.all_thread_caches_stats);
}
}
// Do not hold the lock when calling |dumper|, as it may allocate.
if (!is_light_dump) {
for (auto& stat : bucket_stats) {
if (stat.is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &stat);
}
for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
uint32_t size = direct_map_lengths[i];
PartitionBucketMemoryStats mapped_stats = {};
mapped_stats.is_valid = true;
mapped_stats.is_direct_map = true;
mapped_stats.num_full_slot_spans = 1;
mapped_stats.allocated_slot_span_size = size;
mapped_stats.bucket_slot_size = size;
mapped_stats.active_bytes = size;
mapped_stats.resident_bytes = size;
dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
}
}
dumper->PartitionDumpTotals(partition_name, &stats);
}
template struct BASE_EXPORT PartitionRoot<internal::ThreadSafe>;
template struct BASE_EXPORT PartitionRoot<internal::NotThreadSafe>;
static_assert(sizeof(PartitionRoot<internal::ThreadSafe>) ==
sizeof(PartitionRoot<internal::NotThreadSafe>),
"Layouts should match");
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, buckets) ==
offsetof(PartitionRoot<internal::NotThreadSafe>, buckets),
"Layouts should match");
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
offsetof(PartitionRoot<internal::ThreadSafe>, buckets) +
kNumBuckets *
sizeof(PartitionRoot<internal::ThreadSafe>::Bucket),
"sentinel_bucket must be just after the regular buckets.");
} // namespace base

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cstring>
#include "base/allocator/partition_allocator/partition_stats.h"
namespace base {
SimplePartitionStatsDumper::SimplePartitionStatsDumper() {
memset(&stats_, 0, sizeof(stats_));
}
void SimplePartitionStatsDumper::PartitionDumpTotals(
const char* partition_name,
const PartitionMemoryStats* memory_stats) {
stats_ = *memory_stats;
}
} // namespace base

Some files were not shown because too many files have changed in this diff Show More