mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-11-28 00:06:09 +03:00
Import chromium-95.0.4638.54
This commit is contained in:
commit
e5c6d01cba
39
src/.clang-format
Normal file
39
src/.clang-format
Normal file
@ -0,0 +1,39 @@
|
||||
# Defines the Chromium style for automatic reformatting.
|
||||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
BasedOnStyle: Chromium
|
||||
# This defaults to 'Auto'. Explicitly set it for a while, so that
|
||||
# 'vector<vector<int> >' in existing files gets formatted to
|
||||
# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
|
||||
# 'int>>' if the file already contains at least one such instance.)
|
||||
Standard: Cpp11
|
||||
|
||||
# Make sure code like:
|
||||
# IPC_BEGIN_MESSAGE_MAP()
|
||||
# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
|
||||
# IPC_END_MESSAGE_MAP()
|
||||
# gets correctly indented.
|
||||
MacroBlockBegin: "^\
|
||||
BEGIN_MSG_MAP|\
|
||||
BEGIN_MSG_MAP_EX|\
|
||||
BEGIN_SAFE_MSG_MAP_EX|\
|
||||
CR_BEGIN_MSG_MAP_EX|\
|
||||
IPC_BEGIN_MESSAGE_MAP|\
|
||||
IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
|
||||
IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
|
||||
IPC_STRUCT_BEGIN|\
|
||||
IPC_STRUCT_BEGIN_WITH_PARENT|\
|
||||
IPC_STRUCT_TRAITS_BEGIN|\
|
||||
POLPARAMS_BEGIN|\
|
||||
PPAPI_BEGIN_MESSAGE_MAP$"
|
||||
MacroBlockEnd: "^\
|
||||
CR_END_MSG_MAP|\
|
||||
END_MSG_MAP|\
|
||||
IPC_END_MESSAGE_MAP|\
|
||||
IPC_PROTOBUF_MESSAGE_TRAITS_END|\
|
||||
IPC_STRUCT_END|\
|
||||
IPC_STRUCT_TRAITS_END|\
|
||||
POLPARAMS_END|\
|
||||
PPAPI_END_MESSAGE_MAP$"
|
||||
|
||||
# TODO: Remove this once clang-format r357700 is rolled in.
|
||||
JavaImportGroups: ['android', 'androidx', 'com', 'dalvik', 'junit', 'org', 'com.google.android.apps.chrome', 'org.chromium', 'java', 'javax']
|
56
src/.gitattributes
vendored
Normal file
56
src/.gitattributes
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# Stop Windows python license check presubmit errors by forcing LF checkout.
|
||||
*.py text eol=lf
|
||||
|
||||
# Force LF checkout of the pins files to avoid transport_security_state_generator errors.
|
||||
/net/http/*.pins text eol=lf
|
||||
|
||||
# Force LF checkout for all source files
|
||||
*.bin binary
|
||||
*.c text eol=lf
|
||||
*.cc text eol=lf
|
||||
*.cpp text eol=lf
|
||||
*.csv text eol=lf
|
||||
*.grd text eol=lf
|
||||
*.grdp text eol=lf
|
||||
*.gn text eol=lf
|
||||
*.gni text eol=lf
|
||||
*.h text eol=lf
|
||||
*.html text eol=lf
|
||||
*.idl text eol=lf
|
||||
*.in text eol=lf
|
||||
*.inc text eol=lf
|
||||
*.java text eol=lf
|
||||
*.js text eol=lf
|
||||
*.json text eol=lf
|
||||
*.json5 text eol=lf
|
||||
*.md text eol=lf
|
||||
*.mm text eol=lf
|
||||
*.mojom text eol=lf
|
||||
*.pdf -diff
|
||||
*.proto text eol=lf
|
||||
*.sh text eol=lf
|
||||
*.sql text eol=lf
|
||||
*.txt text eol=lf
|
||||
*.xml text eol=lf
|
||||
*.xslt text eol=lf
|
||||
.clang-format text eol=lf
|
||||
.eslintrc.js text eol=lf
|
||||
.git-blame-ignore-revs text eol=lf
|
||||
.gitattributes text eol=lf
|
||||
.gitignore text eol=lf
|
||||
.vpython text eol=lf
|
||||
codereview.settings text eol=lf
|
||||
DEPS text eol=lf
|
||||
ENG_REVIEW_OWNERS text eol=lf
|
||||
LICENSE text eol=lf
|
||||
LICENSE.* text eol=lf
|
||||
MAJOR_BRANCH_DATE text eol=lf
|
||||
OWNERS text eol=lf
|
||||
README text eol=lf
|
||||
README.* text eol=lf
|
||||
WATCHLISTS text eol=lf
|
||||
VERSION text eol=lf
|
||||
DIR_METADATA text eol=lf
|
||||
|
||||
# Skip Tricium by default on files in third_party.
|
||||
third_party/** -tricium
|
162
src/.gn
Normal file
162
src/.gn
Normal file
@ -0,0 +1,162 @@
|
||||
# This file is used by the GN meta build system to find the root of the source
|
||||
# tree and to set startup options. For documentation on the values set in this
|
||||
# file, run "gn help dotfile" at the command line.
|
||||
|
||||
import("//build/dotfile_settings.gni")
|
||||
import("//third_party/angle/dotfile_settings.gni")
|
||||
|
||||
# The location of the build configuration file.
|
||||
buildconfig = "//build/config/BUILDCONFIG.gn"
|
||||
|
||||
# The python interpreter to use by default. On Windows, this will look
|
||||
# for python3.exe and python3.bat.
|
||||
script_executable = "python3"
|
||||
|
||||
# These arguments override the default values for items in a declare_args
|
||||
# block. "gn args" in turn can override these.
|
||||
#
|
||||
# In general the value for a build arg in the declare_args block should be the
|
||||
# default. In some cases, a DEPS-ed in project will want different defaults for
|
||||
# being built as part of Chrome vs. being built standalone. In this case, the
|
||||
# Chrome defaults should go here. There should be no overrides here for
|
||||
# values declared in the main Chrome repository.
|
||||
#
|
||||
# Important note for defining defaults: This file is executed before the
|
||||
# BUILDCONFIG.gn file. That file sets up the global variables like "is_ios".
|
||||
# This means that the default_args can not depend on the platform,
|
||||
# architecture, or other build parameters. If you really need that, the other
|
||||
# repo should define a flag that toggles on a behavior that implements the
|
||||
# additional logic required by Chrome to set the variables.
|
||||
default_args = {
|
||||
# TODO(brettw) bug 684096: Chrome on iOS does not build v8, so "gn gen" prints
|
||||
# a warning that "Build argument has no effect". When adding a v8 variable, it
|
||||
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both
|
||||
# location when it is removed).
|
||||
|
||||
v8_extra_library_files = []
|
||||
v8_experimental_extra_library_files = []
|
||||
v8_enable_gdbjit = false
|
||||
v8_imminent_deprecation_warnings = false
|
||||
|
||||
# Don't include webrtc's builtin task queue implementation.
|
||||
rtc_link_task_queue_impl = false
|
||||
|
||||
# Don't include the iLBC audio codec.
|
||||
# TODO(bugs.webrtc.org/8396): Once WebRTC gets rid of its internal
|
||||
# deps on codecs, we can remove this.
|
||||
rtc_include_ilbc = false
|
||||
|
||||
# Changes some setup for the Crashpad build to set them to build against
|
||||
# Chromium's zlib, base, etc.
|
||||
crashpad_dependencies = "chromium"
|
||||
|
||||
# Override ANGLE's Vulkan dependencies.
|
||||
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
|
||||
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
|
||||
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
|
||||
angle_vulkan_validation_layers_dir =
|
||||
"//third_party/vulkan-deps/vulkan-validation-layers/src"
|
||||
|
||||
devtools_visibility = [ "*" ]
|
||||
}
|
||||
|
||||
# These are the targets to skip header checking by default. The files in targets
|
||||
# matching these patterns (see "gn help label_pattern" for format) will not have
|
||||
# their includes checked for proper dependencies when you run either
|
||||
# "gn check" or "gn gen --check".
|
||||
no_check_targets = [
|
||||
"//extensions:*", # 28 errors
|
||||
"//headless:*", # 107 errors
|
||||
|
||||
"//third_party/libwebp:*", # 7 errors, https://crbug.com/800762
|
||||
|
||||
# //v8, https://crbug.com/v8/7330
|
||||
"//v8/src/inspector:inspector", # 20 errors
|
||||
"//v8/test/cctest:cctest_sources", # 2 errors
|
||||
"//v8:cppgc_base", # 1 error
|
||||
"//v8:v8_internal_headers", # 11 errors
|
||||
"//v8:v8_libplatform", # 2 errors
|
||||
]
|
||||
|
||||
# These are the list of GN files that run exec_script. This whitelist exists
|
||||
# to force additional review for new uses of exec_script, which is strongly
|
||||
# discouraged.
|
||||
#
|
||||
# PLEASE READ
|
||||
#
|
||||
# You should almost never need to add new exec_script calls. exec_script is
|
||||
# slow, especially on Windows, and can cause confusing effects. Although
|
||||
# individually each call isn't slow or necessarily very confusing, at the scale
|
||||
# of our repo things get out of hand quickly. By strongly pushing back on all
|
||||
# additions, we keep the build fast and clean. If you think you need to add a
|
||||
# new call, please consider:
|
||||
#
|
||||
# - Do not use a script to check for the existence of a file or directory to
|
||||
# enable a different mode. Instead, use GN build args to enable or disable
|
||||
# functionality and set options. An example is checking for a file in the
|
||||
# src-internal repo to see if the corresponding src-internal feature should
|
||||
# be enabled. There are several things that can go wrong with this:
|
||||
#
|
||||
# - It's mysterious what causes some things to happen. Although in many cases
|
||||
# such behavior can be conveniently automatic, GN optimizes for explicit
|
||||
# and obvious behavior so people can more easily diagnose problems.
|
||||
#
|
||||
# - The user can't enable a mode for one build and not another. With GN build
|
||||
# args, the user can choose the exact configuration of multiple builds
|
||||
# using one checkout. But implicitly basing flags on the state of the
|
||||
# checkout, this functionality is broken.
|
||||
#
|
||||
# - It's easy to get stale files. If for example the user edits the gclient
|
||||
# to stop checking out src-internal (or any other optional thing), it's
|
||||
# easy to end up with stale files still mysteriously triggering build
|
||||
# conditions that are no longer appropriate (yes, this happens in real
|
||||
# life).
|
||||
#
|
||||
# - Do not use a script to iterate files in a directory (glob):
|
||||
#
|
||||
# - This has the same "stale file" problem as the above discussion. Various
|
||||
# operations can leave untracked files in the source tree which can cause
|
||||
# surprising effects.
|
||||
#
|
||||
# - It becomes impossible to use "git grep" to find where a certain file is
|
||||
# referenced. This operation is very common and people really do get
|
||||
# confused when things aren't listed.
|
||||
#
|
||||
# - It's easy to screw up. One common case is a build-time script that packs
|
||||
# up a directory. The author notices that the script isn't re-run when the
|
||||
# directory is updated, so adds a glob so all the files are listed as
|
||||
# inputs. This seems to work great... until a file is deleted. When a
|
||||
# file is deleted, all the inputs the glob lists will still be up to date
|
||||
# and no command-lines will have been changed. The action will not be
|
||||
# re-run and the build will be broken. It is possible to get this correct
|
||||
# using glob, and it's possible to mess it up without glob, but globs make
|
||||
# this situation much easier to create. if the build always lists the
|
||||
# files and passes them to a script, it will always be correct.
|
||||
|
||||
exec_script_whitelist =
|
||||
build_dotfile_settings.exec_script_whitelist +
|
||||
angle_dotfile_settings.exec_script_whitelist +
|
||||
[
|
||||
# Whitelist entries for //build should go into
|
||||
# //build/dotfile_settings.gni instead, so that they can be shared
|
||||
# with other repos. The entries in this list should be only for files
|
||||
# in the Chromium repo outside of //build.
|
||||
"//build_overrides/build.gni",
|
||||
|
||||
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
|
||||
|
||||
# TODO(dgn): Layer violation but breaks the build otherwise, see
|
||||
# https://crbug.com/474506.
|
||||
"//clank/java/BUILD.gn",
|
||||
"//clank/native/BUILD.gn",
|
||||
|
||||
"//google_apis/BUILD.gn",
|
||||
"//printing/BUILD.gn",
|
||||
|
||||
"//remoting/host/installer/linux/BUILD.gn",
|
||||
"//remoting/remoting_version.gni",
|
||||
"//remoting/host/installer/win/generate_clsids.gni",
|
||||
|
||||
"//tools/grit/grit_rule.gni",
|
||||
"//tools/gritsettings/BUILD.gn",
|
||||
]
|
1323
src/AUTHORS
Normal file
1323
src/AUTHORS
Normal file
File diff suppressed because it is too large
Load Diff
1639
src/BUILD.gn
Normal file
1639
src/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
27
src/LICENSE
Normal file
27
src/LICENSE
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4403
src/base/BUILD.gn
Normal file
4403
src/base/BUILD.gn
Normal file
File diff suppressed because it is too large
Load Diff
22
src/base/DEPS
Normal file
22
src/base/DEPS
Normal file
@ -0,0 +1,22 @@
|
||||
include_rules = [
|
||||
"+third_party/ashmem",
|
||||
"+third_party/apple_apsl",
|
||||
"+third_party/boringssl/src/include",
|
||||
"+third_party/ced",
|
||||
"+third_party/libunwindstack/src/libunwindstack/include",
|
||||
"+third_party/lss",
|
||||
"+third_party/modp_b64",
|
||||
"+third_party/perfetto/include",
|
||||
"+third_party/perfetto/protos/perfetto",
|
||||
"+third_party/tcmalloc",
|
||||
|
||||
# These are implicitly brought in from the root, and we don't want them.
|
||||
"-ipc",
|
||||
"-url",
|
||||
|
||||
# ICU dependendencies must be separate from the rest of base.
|
||||
"-i18n",
|
||||
|
||||
# //base/util can use //base but not vice versa.
|
||||
"-util",
|
||||
]
|
3
src/base/DIR_METADATA
Normal file
3
src/base/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
||||
monorail {
|
||||
component: "Internals>Core"
|
||||
}
|
41
src/base/OWNERS
Normal file
41
src/base/OWNERS
Normal file
@ -0,0 +1,41 @@
|
||||
# See //base/README.md to find qualification for being an owner.
|
||||
|
||||
set noparent
|
||||
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||
ajwong@chromium.org
|
||||
danakj@chromium.org
|
||||
dcheng@chromium.org
|
||||
fdoray@chromium.org
|
||||
gab@chromium.org
|
||||
jdoerrie@chromium.org
|
||||
kylechar@chromium.org
|
||||
mark@chromium.org
|
||||
thakis@chromium.org
|
||||
thestig@chromium.org
|
||||
wez@chromium.org
|
||||
# NOTE: keep this in sync with lsc-owners-override@chromium.org owners
|
||||
|
||||
# per-file rules:
|
||||
# These are for the common case of adding or renaming files. If you're doing
|
||||
# structural changes, please get a review from a reviewer in this file.
|
||||
per-file BUILD.gn=*
|
||||
|
||||
# For Android-specific changes:
|
||||
per-file *android*=file://base/android/OWNERS
|
||||
per-file BUILD.gn=file://base/android/OWNERS
|
||||
|
||||
# For Fuchsia-specific changes:
|
||||
per-file *_fuchsia*=file://build/fuchsia/OWNERS
|
||||
|
||||
# For Windows-specific changes:
|
||||
per-file *_win*=file://base/win/OWNERS
|
||||
|
||||
per-file callback_list*=pkasting@chromium.org
|
||||
per-file feature_list*=asvitkine@chromium.org
|
||||
per-file feature_list*=isherman@chromium.org
|
||||
|
||||
# Restricted since rand_util.h also backs the cryptographically secure RNG.
|
||||
per-file rand_util*=set noparent
|
||||
per-file rand_util*=file://ipc/SECURITY_OWNERS
|
||||
|
||||
per-file safe_numerics_unittest.cc=file://base/numerics/OWNERS
|
144
src/base/PRESUBMIT.py
Normal file
144
src/base/PRESUBMIT.py
Normal file
@ -0,0 +1,144 @@
|
||||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Chromium presubmit script for src/base.
|
||||
|
||||
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||
for more details on the presubmit API built into depot_tools.
|
||||
"""
|
||||
|
||||
|
||||
USE_PYTHON3 = True
|
||||
|
||||
|
||||
def _CheckNoInterfacesInBase(input_api, output_api):
|
||||
"""Checks to make sure no files in libbase.a have |@interface|."""
|
||||
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
|
||||
files = []
|
||||
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
|
||||
if (f.LocalPath().startswith('base/') and
|
||||
not "/ios/" in f.LocalPath() and
|
||||
not "/test/" in f.LocalPath() and
|
||||
not f.LocalPath().endswith('.java') and
|
||||
not f.LocalPath().endswith('_unittest.mm') and
|
||||
not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
|
||||
contents = input_api.ReadFile(f)
|
||||
if pattern.search(contents):
|
||||
files.append(f)
|
||||
|
||||
if len(files):
|
||||
return [ output_api.PresubmitError(
|
||||
'Objective-C interfaces or categories are forbidden in libbase. ' +
|
||||
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
|
||||
'browse_thread/thread/efb28c10435987fd',
|
||||
files) ]
|
||||
return []
|
||||
|
||||
|
||||
def _FindLocations(input_api, search_regexes, files_to_check, files_to_skip):
|
||||
"""Returns locations matching one of the search_regexes."""
|
||||
def FilterFile(affected_file):
|
||||
return input_api.FilterSourceFile(
|
||||
affected_file,
|
||||
files_to_check=files_to_check,
|
||||
files_to_skip=files_to_skip)
|
||||
|
||||
no_presubmit = r"// no-presubmit-check"
|
||||
locations = []
|
||||
for f in input_api.AffectedSourceFiles(FilterFile):
|
||||
for line_num, line in f.ChangedContents():
|
||||
for search_regex in search_regexes:
|
||||
if (input_api.re.search(search_regex, line) and
|
||||
not input_api.re.search(no_presubmit, line)):
|
||||
locations.append(" %s:%d" % (f.LocalPath(), line_num))
|
||||
break
|
||||
return locations
|
||||
|
||||
|
||||
def _CheckNoTraceEventInclude(input_api, output_api):
|
||||
"""Verify that //base includes base_tracing.h instead of trace event headers.
|
||||
|
||||
Checks that files outside trace event implementation include the
|
||||
base_tracing.h header instead of specific trace event implementation headers
|
||||
to maintain compatibility with the gn flag "enable_base_tracing = false".
|
||||
"""
|
||||
discouraged_includes = [
|
||||
r'^#include "base/trace_event/(?!base_tracing\.h|base_tracing_forward\.h)',
|
||||
r'^#include "third_party/perfetto/include/',
|
||||
]
|
||||
|
||||
files_to_check = [
|
||||
r".*\.(h|cc|mm)$",
|
||||
]
|
||||
files_to_skip = [
|
||||
r".*[\\/]test[\\/].*",
|
||||
r".*[\\/]trace_event[\\/].*",
|
||||
r".*[\\/]tracing[\\/].*",
|
||||
]
|
||||
|
||||
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
|
||||
files_to_skip)
|
||||
if locations:
|
||||
return [ output_api.PresubmitError(
|
||||
'Base code should include "base/trace_event/base_tracing.h" instead\n' +
|
||||
'of trace_event implementation headers. If you need to include an\n' +
|
||||
'implementation header, verify that "gn check" and base_unittests\n' +
|
||||
'still pass with gn arg "enable_base_tracing = false" and add\n' +
|
||||
'"// no-presubmit-check" after the include. \n' +
|
||||
'\n'.join(locations)) ]
|
||||
return []
|
||||
|
||||
|
||||
def _WarnPbzeroIncludes(input_api, output_api):
|
||||
"""Warn to check enable_base_tracing=false when including a pbzero header.
|
||||
|
||||
Emits a warning when including a perfetto pbzero header, encouraging the
|
||||
user to verify that //base still builds with enable_base_tracing=false.
|
||||
"""
|
||||
warn_includes = [
|
||||
r'^#include "third_party/perfetto/protos/',
|
||||
r'^#include "base/tracing/protos/',
|
||||
]
|
||||
|
||||
files_to_check = [
|
||||
r".*\.(h|cc|mm)$",
|
||||
]
|
||||
files_to_skip = [
|
||||
r".*[\\/]test[\\/].*",
|
||||
r".*[\\/]trace_event[\\/].*",
|
||||
r".*[\\/]tracing[\\/].*",
|
||||
]
|
||||
|
||||
locations = _FindLocations(input_api, warn_includes, files_to_check,
|
||||
files_to_skip)
|
||||
if locations:
|
||||
return [ output_api.PresubmitPromptWarning(
|
||||
'Please verify that "gn check" and base_unittests still pass with\n' +
|
||||
'gn arg "enable_base_tracing = false" when adding typed trace\n' +
|
||||
'events to //base. You can use "#if BUILDFLAG(ENABLE_BASE_TRACING)"\n' +
|
||||
'to exclude pbzero headers and anything not supported by\n' +
|
||||
'//base/trace_event/trace_event_stub.h.\n' +
|
||||
'\n'.join(locations)) ]
|
||||
return []
|
||||
|
||||
|
||||
def _CommonChecks(input_api, output_api):
|
||||
"""Checks common to both upload and commit."""
|
||||
results = []
|
||||
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
|
||||
results.extend(_CheckNoTraceEventInclude(input_api, output_api))
|
||||
results.extend(_WarnPbzeroIncludes(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
results = []
|
||||
results.extend(_CommonChecks(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
results = []
|
||||
results.extend(_CommonChecks(input_api, output_api))
|
||||
return results
|
81
src/base/README.md
Normal file
81
src/base/README.md
Normal file
@ -0,0 +1,81 @@
|
||||
# What is this
|
||||
Contains a written down set of principles and other information on //base.
|
||||
Please add to it!
|
||||
|
||||
## About //base:
|
||||
|
||||
Chromium is a very mature project. Most things that are generally useful are
|
||||
already here and things not here aren't generally useful.
|
||||
|
||||
The bar for adding stuff to base is that it must have demonstrated wide
|
||||
applicability. Prefer to add things closer to where they're used (i.e. "not
|
||||
base"), and pull into base only when needed. In a project our size,
|
||||
sometimes even duplication is OK and inevitable.
|
||||
|
||||
Adding a new logging macro `DPVELOG_NE` is not more clear than just
|
||||
writing the stuff you want to log in a regular logging statement, even
|
||||
if it makes your calling code longer. Just add it to your own code.
|
||||
|
||||
If the code in question does not need to be used inside base, but will have
|
||||
multiple consumers across the codebase, consider placing it in a new directory
|
||||
under components/ instead.
|
||||
|
||||
base is written for the Chromium project and is not intended to be used
|
||||
outside it. Using base outside of src.git is explicitly not supported,
|
||||
and base makes no guarantees about API (or even ABI) stability (like all
|
||||
other code in Chromium). New code that depends on base/ must be in
|
||||
src.git. Code that's not in src.git but pulled in through DEPS (for
|
||||
example, v8) cannot use base.
|
||||
|
||||
## Qualifications for being in //base OWNERS
|
||||
* interest and ability to learn low level/high detail/complex c++ stuff
|
||||
* inclination to always ask why and understand everything (including external
|
||||
interactions like win32) rather than just hoping the author did it right
|
||||
* mentorship/experience
|
||||
* demonstrated good judgement (esp with regards to public APIs) over a length
|
||||
of time
|
||||
|
||||
Owners are added when a contributor has shown the above qualifications and
|
||||
when they express interest. There isn't an upper bound on the number of OWNERS.
|
||||
|
||||
## Design and naming
|
||||
* Be sure to use the base namespace.
|
||||
* STL-like constructs should adhere as closely to STL as possible. Functions
|
||||
and behaviors not present in STL should only be added when they are related
|
||||
to the specific data structure implemented by the container.
|
||||
* For STL-like constructs our policy is that they should use STL-like naming
|
||||
even when it may conflict with the style guide. So functions and class names
|
||||
should be lower case with underscores. Non-STL-like classes and functions
|
||||
should use Google naming.
|
||||
|
||||
## Performance testing
|
||||
|
||||
Since the primitives provided by //base are used very widely, it is important to
|
||||
ensure they scale to the necessary workloads and perform well under all
|
||||
supported platforms. The `base_perftests` target is a suite of
|
||||
synthetic microbenchmarks that measure performance in various scenarios:
|
||||
|
||||
* BasicPostTaskPerfTest: Exercises MessageLoopTaskRunner's multi-threaded
|
||||
queue in isolation.
|
||||
* ConditionVariablePerfTest: Measures thread switching cost of condition
|
||||
variables.
|
||||
* IntegratedPostTaskPerfTest: Exercises the full MessageLoop/RunLoop
|
||||
machinery.
|
||||
* JSONPerfTest: Tests JSONWriter and JSONReader performance.
|
||||
* MessageLoopPerfTest: Measures the speed of task posting in various
|
||||
configurations.
|
||||
* ObserverListPerfTest: Exercises adding, removing and signalling observers.
|
||||
* PthreadEventPerfTest: Establishes the baseline thread switching cost using
|
||||
pthreads.
|
||||
* ScheduleWorkTest: Measures the overhead of MessagePump::ScheduleWork.
|
||||
* SequenceManagerPerfTest: Benchmarks SequenceManager scheduling with various
|
||||
underlying task runners.
|
||||
* TaskObserverPerfTest: Measures the incremental cost of adding task
|
||||
observers.
|
||||
* TaskPerfTest: Checks the cost of posting tasks between threads.
|
||||
* WaitableEvent{Thread,}PerfTest: Measures waitable events in single and
|
||||
multithreaded scenarios.
|
||||
|
||||
Regressions in these benchmarks can generally by caused by 1) operating system
|
||||
changes, 2) compiler version or flag changes or 3) changes in //base code
|
||||
itself.
|
13
src/base/SECURITY_OWNERS
Normal file
13
src/base/SECURITY_OWNERS
Normal file
@ -0,0 +1,13 @@
|
||||
# Changes to code that runs at high privilege and which has a high risk of
|
||||
# memory corruption, such as parsers for complex inputs, require a security
|
||||
# review to avoid introducing sandbox escapes.
|
||||
#
|
||||
# Although this file is in base/, it may apply to more than just base, OWNERS
|
||||
# files outside of base may also include this file.
|
||||
#
|
||||
# Security team: If you are uncomfortable reviewing a particular bit of code
|
||||
# yourself, don't hesitate to seek help from another security team member!
|
||||
# Nobody knows everything, and the only way to learn is from experience.
|
||||
dcheng@chromium.org
|
||||
rsesek@chromium.org
|
||||
tsepez@chromium.org
|
360
src/base/allocator/BUILD.gn
Normal file
360
src/base/allocator/BUILD.gn
Normal file
@ -0,0 +1,360 @@
|
||||
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import("//base/allocator/allocator.gni")
|
||||
import("//build/buildflag_header.gni")
|
||||
import("//build/config/compiler/compiler.gni")
|
||||
import("//build/config/dcheck_always_on.gni")
|
||||
|
||||
declare_args() {
|
||||
# Provide a way to force disable debugallocation in Debug builds,
|
||||
# e.g. for profiling (it's more rare to profile Debug builds,
|
||||
# but people sometimes need to do that).
|
||||
enable_debugallocation = is_debug
|
||||
|
||||
# Provide a way to build tcmalloc with a low memory footprint.
|
||||
use_tcmalloc_small_but_slow = false
|
||||
}
|
||||
|
||||
# This "allocator" meta-target will forward to the default allocator according
|
||||
# to the build settings.
|
||||
group("allocator") {
|
||||
public_deps = []
|
||||
deps = []
|
||||
|
||||
if (use_allocator == "tcmalloc") {
|
||||
deps += [ ":tcmalloc" ]
|
||||
}
|
||||
}
|
||||
|
||||
config("tcmalloc_flags") {
|
||||
defines = [
|
||||
"TCMALLOC_USE_DOUBLYLINKED_FREELIST",
|
||||
"TCMALLOC_DISABLE_HUGE_ALLOCATIONS",
|
||||
]
|
||||
if (enable_debugallocation) {
|
||||
defines += [
|
||||
# Use debugallocation for Debug builds to catch problems early
|
||||
# and cleanly, http://crbug.com/30715 .
|
||||
"TCMALLOC_FOR_DEBUGALLOCATION",
|
||||
]
|
||||
}
|
||||
if (use_allocator_shim) {
|
||||
defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
|
||||
}
|
||||
if (use_tcmalloc_small_but_slow) {
|
||||
defines += [ "TCMALLOC_SMALL_BUT_SLOW" ]
|
||||
}
|
||||
if (is_clang) {
|
||||
cflags = [
|
||||
# tcmalloc initializes some fields in the wrong order.
|
||||
"-Wno-reorder",
|
||||
|
||||
# tcmalloc contains some unused local template specializations.
|
||||
"-Wno-unused-function",
|
||||
|
||||
# tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
|
||||
"-Wno-unused-local-typedefs",
|
||||
|
||||
# for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
|
||||
"-Wno-unused-private-field",
|
||||
]
|
||||
} else {
|
||||
cflags = []
|
||||
}
|
||||
|
||||
if (is_linux || is_chromeos || is_android) {
|
||||
# We enable all warnings by default, but upstream disables a few.
|
||||
# Keep "-Wno-*" flags in sync with upstream by comparing against:
|
||||
# http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
|
||||
cflags += [
|
||||
"-Wno-sign-compare",
|
||||
"-Wno-unused-result",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
if (use_allocator == "tcmalloc") {
|
||||
# tcmalloc currently won't compile on Android.
|
||||
source_set("tcmalloc") {
|
||||
tcmalloc_dir = "//third_party/tcmalloc/chromium"
|
||||
|
||||
# Don't check tcmalloc's includes. These files include various files like
|
||||
# base/foo.h and they actually refer to tcmalloc's forked copy of base
|
||||
# rather than the regular one, which confuses the header checker.
|
||||
check_includes = false
|
||||
|
||||
sources = [
|
||||
# Generated for our configuration from tcmalloc's build
|
||||
# and checked in.
|
||||
"$tcmalloc_dir/src/config.h",
|
||||
|
||||
# tcmalloc native and forked files.
|
||||
"$tcmalloc_dir/src/base/abort.cc",
|
||||
"$tcmalloc_dir/src/base/abort.h",
|
||||
"$tcmalloc_dir/src/base/arm_instruction_set_select.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-arm-generic.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-arm-v6plus.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-linuxppc.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-macosx.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-windows.h",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-x86.cc",
|
||||
"$tcmalloc_dir/src/base/atomicops-internals-x86.h",
|
||||
"$tcmalloc_dir/src/base/atomicops.h",
|
||||
"$tcmalloc_dir/src/base/commandlineflags.h",
|
||||
|
||||
# We don't list dynamic_annotations.c since its copy is already
|
||||
# present in the dynamic_annotations target.
|
||||
"$tcmalloc_dir/src/base/elf_mem_image.cc",
|
||||
"$tcmalloc_dir/src/base/elf_mem_image.h",
|
||||
"$tcmalloc_dir/src/base/linuxthreads.cc",
|
||||
"$tcmalloc_dir/src/base/linuxthreads.h",
|
||||
"$tcmalloc_dir/src/base/logging.cc",
|
||||
"$tcmalloc_dir/src/base/logging.h",
|
||||
"$tcmalloc_dir/src/base/low_level_alloc.cc",
|
||||
"$tcmalloc_dir/src/base/low_level_alloc.h",
|
||||
"$tcmalloc_dir/src/base/spinlock.cc",
|
||||
"$tcmalloc_dir/src/base/spinlock.h",
|
||||
"$tcmalloc_dir/src/base/spinlock_internal.cc",
|
||||
"$tcmalloc_dir/src/base/spinlock_internal.h",
|
||||
"$tcmalloc_dir/src/base/sysinfo.cc",
|
||||
"$tcmalloc_dir/src/base/sysinfo.h",
|
||||
"$tcmalloc_dir/src/base/vdso_support.cc",
|
||||
"$tcmalloc_dir/src/base/vdso_support.h",
|
||||
"$tcmalloc_dir/src/central_freelist.cc",
|
||||
"$tcmalloc_dir/src/central_freelist.h",
|
||||
"$tcmalloc_dir/src/common.cc",
|
||||
"$tcmalloc_dir/src/common.h",
|
||||
|
||||
# #included by debugallocation_shim.cc
|
||||
#"$tcmalloc_dir/src/debugallocation.cc",
|
||||
"$tcmalloc_dir/src/fake_stacktrace_scope.cc",
|
||||
"$tcmalloc_dir/src/free_list.cc",
|
||||
"$tcmalloc_dir/src/free_list.h",
|
||||
"$tcmalloc_dir/src/gperftools/heap-profiler.h",
|
||||
"$tcmalloc_dir/src/gperftools/malloc_extension.h",
|
||||
"$tcmalloc_dir/src/gperftools/malloc_hook.h",
|
||||
"$tcmalloc_dir/src/gperftools/stacktrace.h",
|
||||
"$tcmalloc_dir/src/internal_logging.cc",
|
||||
"$tcmalloc_dir/src/internal_logging.h",
|
||||
"$tcmalloc_dir/src/linked_list.h",
|
||||
"$tcmalloc_dir/src/malloc_extension.cc",
|
||||
"$tcmalloc_dir/src/malloc_hook-inl.h",
|
||||
"$tcmalloc_dir/src/malloc_hook.cc",
|
||||
"$tcmalloc_dir/src/maybe_emergency_malloc.h",
|
||||
"$tcmalloc_dir/src/maybe_threads.cc",
|
||||
"$tcmalloc_dir/src/maybe_threads.h",
|
||||
"$tcmalloc_dir/src/page_heap.cc",
|
||||
"$tcmalloc_dir/src/page_heap.h",
|
||||
"$tcmalloc_dir/src/raw_printer.cc",
|
||||
"$tcmalloc_dir/src/raw_printer.h",
|
||||
"$tcmalloc_dir/src/sampler.cc",
|
||||
"$tcmalloc_dir/src/sampler.h",
|
||||
"$tcmalloc_dir/src/span.cc",
|
||||
"$tcmalloc_dir/src/span.h",
|
||||
"$tcmalloc_dir/src/stack_trace_table.cc",
|
||||
"$tcmalloc_dir/src/stack_trace_table.h",
|
||||
"$tcmalloc_dir/src/stacktrace.cc",
|
||||
"$tcmalloc_dir/src/static_vars.cc",
|
||||
"$tcmalloc_dir/src/static_vars.h",
|
||||
"$tcmalloc_dir/src/symbolize.cc",
|
||||
"$tcmalloc_dir/src/symbolize.h",
|
||||
"$tcmalloc_dir/src/system-alloc.cc",
|
||||
"$tcmalloc_dir/src/system-alloc.h",
|
||||
|
||||
# #included by debugallocation_shim.cc
|
||||
#"$tcmalloc_dir/src/tcmalloc.cc",
|
||||
#"$tcmalloc_dir/src/tcmalloc.h",
|
||||
"$tcmalloc_dir/src/thread_cache.cc",
|
||||
"$tcmalloc_dir/src/thread_cache.h",
|
||||
"$tcmalloc_dir/src/windows/port.cc",
|
||||
"$tcmalloc_dir/src/windows/port.h",
|
||||
"debugallocation_shim.cc",
|
||||
|
||||
# These are both #included by allocator_shim for maximal linking.
|
||||
#"generic_allocators.cc",
|
||||
#"win_allocator.cc",
|
||||
]
|
||||
|
||||
if (is_android) {
|
||||
sources += [ "$tcmalloc_dir/src/config_android.h" ]
|
||||
}
|
||||
|
||||
if (is_linux || is_chromeos) {
|
||||
sources += [ "$tcmalloc_dir/src/config_linux.h" ]
|
||||
}
|
||||
|
||||
if (is_win) {
|
||||
sources += [ "$tcmalloc_dir/src/config_win.h" ]
|
||||
}
|
||||
|
||||
# Not included on mips64el.
|
||||
if (current_cpu == "mips64el") {
|
||||
sources -= [
|
||||
"$tcmalloc_dir/src/base/linuxthreads.cc",
|
||||
"$tcmalloc_dir/src/base/linuxthreads.h",
|
||||
]
|
||||
}
|
||||
|
||||
# Disable the heap checker in tcmalloc.
|
||||
defines = [ "NO_HEAP_CHECK" ]
|
||||
|
||||
include_dirs = [
|
||||
".",
|
||||
"$tcmalloc_dir/src/base",
|
||||
"$tcmalloc_dir/src",
|
||||
]
|
||||
|
||||
configs -= [ "//build/config/compiler:chromium_code" ]
|
||||
configs += [
|
||||
"//build/config/compiler:no_chromium_code",
|
||||
":tcmalloc_flags",
|
||||
]
|
||||
|
||||
# Thumb mode disabled due to bug in clang integrated assembler
|
||||
# TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
|
||||
configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
|
||||
configs += [ "//build/config/compiler:compiler_arm" ]
|
||||
|
||||
# TODO(crbug.com/633719) Make tcmalloc work with AFDO on GCC if possible.
|
||||
if (!is_clang) {
|
||||
configs -= [ "//build/config/compiler:afdo" ]
|
||||
}
|
||||
|
||||
deps = [
|
||||
":buildflags",
|
||||
"//build:chromeos_buildflags",
|
||||
]
|
||||
|
||||
if (enable_profiling) {
|
||||
sources += [
|
||||
"$tcmalloc_dir/src/base/thread_lister.c",
|
||||
"$tcmalloc_dir/src/base/thread_lister.h",
|
||||
"$tcmalloc_dir/src/heap-profile-table.cc",
|
||||
"$tcmalloc_dir/src/heap-profile-table.h",
|
||||
"$tcmalloc_dir/src/heap-profiler.cc",
|
||||
"$tcmalloc_dir/src/memory_region_map.cc",
|
||||
"$tcmalloc_dir/src/memory_region_map.h",
|
||||
"$tcmalloc_dir/src/profile-handler.cc",
|
||||
"$tcmalloc_dir/src/profile-handler.h",
|
||||
"$tcmalloc_dir/src/profiledata.cc",
|
||||
"$tcmalloc_dir/src/profiledata.h",
|
||||
"$tcmalloc_dir/src/profiler.cc",
|
||||
]
|
||||
defines += [ "ENABLE_PROFILING=1" ]
|
||||
}
|
||||
|
||||
if (is_linux || is_chromeos || is_android) {
|
||||
sources -= [
|
||||
"$tcmalloc_dir/src/system-alloc.h",
|
||||
"$tcmalloc_dir/src/windows/port.cc",
|
||||
"$tcmalloc_dir/src/windows/port.h",
|
||||
]
|
||||
|
||||
# Compiling tcmalloc with -fvisibility=default is only necessary when
|
||||
# not using the allocator shim, which provides the correct visibility
|
||||
# annotations for those symbols which need to be exported (see
|
||||
# //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
|
||||
# //base/allocator/allocator_shim_internals.h for the definition of
|
||||
# SHIM_ALWAYS_EXPORT).
|
||||
if (!use_allocator_shim) {
|
||||
configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
|
||||
configs += [ "//build/config/gcc:symbol_visibility_default" ]
|
||||
}
|
||||
|
||||
ldflags = [
|
||||
# Don't let linker rip this symbol out, otherwise the heap&cpu
|
||||
# profilers will not initialize properly on startup.
|
||||
"-Wl,-uIsHeapProfilerRunning,-uProfilerStart",
|
||||
|
||||
# Do the same for heap leak checker.
|
||||
"-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi",
|
||||
"-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl",
|
||||
"-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv",
|
||||
]
|
||||
}
|
||||
|
||||
# Make sure the allocation library is optimized as much as possible when
|
||||
# we"re in release mode.
|
||||
if (!is_debug) {
|
||||
configs -= [ "//build/config/compiler:default_optimization" ]
|
||||
configs += [ "//build/config/compiler:optimize_max" ]
|
||||
}
|
||||
|
||||
deps += [ "//base/third_party/dynamic_annotations" ]
|
||||
}
|
||||
} # use_allocator == "tcmalloc"
|
||||
|
||||
buildflag_header("buildflags") {
|
||||
header = "buildflags.h"
|
||||
_use_partition_alloc_as_malloc = use_allocator == "partition"
|
||||
_use_tcmalloc_as_malloc = use_allocator == "tcmalloc"
|
||||
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
|
||||
"Partition alloc requires the allocator shim")
|
||||
|
||||
# BackupRefPtr(BRP) build flags.
|
||||
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
|
||||
_use_backup_ref_ptr_fake =
|
||||
use_backup_ref_ptr_fake && use_partition_alloc && !is_nacl
|
||||
_enable_backup_ref_ptr_in_renderer_process =
|
||||
enable_backup_ref_ptr_in_renderer_process && _use_backup_ref_ptr
|
||||
_put_ref_count_in_previous_slot =
|
||||
put_ref_count_in_previous_slot && _use_backup_ref_ptr
|
||||
_never_remove_from_brp_pool_from_blocklist =
|
||||
never_remove_from_brp_pool_blocklist && _use_backup_ref_ptr
|
||||
_enable_backup_ref_ptr_slow_checks =
|
||||
enable_backup_ref_ptr_slow_checks && _use_backup_ref_ptr
|
||||
|
||||
# AlignedAlloc relies on natural alignment offered by the allocator (see the
|
||||
# comment inside PartitionRoot::AlignedAllocFlags). Any extras in front of the
|
||||
# allocation will mess up that alignment. Such extras are used when
|
||||
# BackupRefPtr is on (unless the "previous slot" variant is used). In this
|
||||
# case, we have to create a separate, smaller partition, dedicated to handle
|
||||
# only aligned allocations, where those extras are disabled.
|
||||
_use_dedicated_partition_for_aligned_alloc_upon_enabling_brp =
|
||||
_use_backup_ref_ptr && !_put_ref_count_in_previous_slot
|
||||
|
||||
flags = [
|
||||
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||
"USE_TCMALLOC=$_use_tcmalloc_as_malloc",
|
||||
"USE_PARTITION_ALLOC=$use_partition_alloc",
|
||||
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
|
||||
|
||||
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||
"USE_BACKUP_REF_PTR_FAKE=$_use_backup_ref_ptr_fake",
|
||||
"ENABLE_BACKUP_REF_PTR_IN_RENDERER_PROCESS=$_enable_backup_ref_ptr_in_renderer_process",
|
||||
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||
"NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST=$_never_remove_from_brp_pool_from_blocklist",
|
||||
"USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP=$_use_dedicated_partition_for_aligned_alloc_upon_enabling_brp",
|
||||
]
|
||||
}
|
||||
|
||||
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
|
||||
config("wrap_malloc_symbols") {
|
||||
ldflags = [
|
||||
"-Wl,-wrap,calloc",
|
||||
"-Wl,-wrap,free",
|
||||
"-Wl,-wrap,malloc",
|
||||
"-Wl,-wrap,memalign",
|
||||
"-Wl,-wrap,posix_memalign",
|
||||
"-Wl,-wrap,pvalloc",
|
||||
"-Wl,-wrap,realloc",
|
||||
"-Wl,-wrap,valloc",
|
||||
|
||||
# <stdlib.h> functions
|
||||
"-Wl,-wrap,realpath",
|
||||
|
||||
# <string.h> functions
|
||||
"-Wl,-wrap,strdup",
|
||||
"-Wl,-wrap,strndup",
|
||||
|
||||
# <unistd.h> functions
|
||||
"-Wl,-wrap,getcwd",
|
||||
|
||||
# <stdio.h> functions
|
||||
"-Wl,-wrap,asprintf",
|
||||
"-Wl,-wrap,vasprintf",
|
||||
]
|
||||
}
|
3
src/base/allocator/DIR_METADATA
Normal file
3
src/base/allocator/DIR_METADATA
Normal file
@ -0,0 +1,3 @@
|
||||
monorail {
|
||||
component: "Internals"
|
||||
}
|
7
src/base/allocator/OWNERS
Normal file
7
src/base/allocator/OWNERS
Normal file
@ -0,0 +1,7 @@
|
||||
lizeb@chromium.org
|
||||
primiano@chromium.org
|
||||
wfh@chromium.org
|
||||
|
||||
per-file allocator.gni=bartekn@chromium.org
|
||||
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org
|
||||
per-file BUILD.gn=bartekn@chromium.org
|
188
src/base/allocator/README.md
Normal file
188
src/base/allocator/README.md
Normal file
@ -0,0 +1,188 @@
|
||||
This document describes how malloc / new calls are routed in the various Chrome
|
||||
platforms.
|
||||
|
||||
Bare in mind that the chromium codebase does not always just use `malloc()`.
|
||||
Some examples:
|
||||
- Large parts of the renderer (Blink) use two home-brewed allocators,
|
||||
PartitionAlloc and BlinkGC (Oilpan).
|
||||
- Some subsystems, such as the V8 JavaScript engine, handle memory management
|
||||
autonomously.
|
||||
- Various parts of the codebase use abstractions such as `SharedMemory` or
|
||||
`DiscardableMemory` which, similarly to the above, have their own page-level
|
||||
memory management.
|
||||
|
||||
Background
|
||||
----------
|
||||
The `allocator` target defines at compile-time the platform-specific choice of
|
||||
the allocator and extra-hooks which services calls to malloc/new. The relevant
|
||||
build-time flags involved are `use_allocator` and `use_allocator_shim`.
|
||||
|
||||
The default choices are as follows:
|
||||
|
||||
**Windows**
|
||||
`use_allocator: winheap`, the default Windows heap.
|
||||
Additionally, `static_library` (i.e. non-component) builds have a shim
|
||||
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
|
||||
The shim layer provides extra security features, such as preventing large
|
||||
allocations that can hit signed vs. unsigned bugs in third_party code.
|
||||
|
||||
**Linux Desktop / CrOS**
|
||||
`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
|
||||
`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
|
||||
to fall back to the system (Glibc) symbols.
|
||||
|
||||
**Android**
|
||||
`use_allocator: none`, always use the allocator symbols coming from Android's
|
||||
libc (Bionic). As it is developed as part of the OS, it is considered to be
|
||||
optimized for small devices and more memory-efficient than other choices.
|
||||
The actual implementation backing malloc symbols in Bionic is up to the board
|
||||
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
|
||||
|
||||
**Mac/iOS**
|
||||
`use_allocator: none`, we always use the system's allocator implementation.
|
||||
|
||||
In addition, when building for `asan` / `msan` both the allocator and the shim
|
||||
layer are disabled.
|
||||
|
||||
Layering and build deps
|
||||
-----------------------
|
||||
The `allocator` target provides both the source files for tcmalloc (where
|
||||
applicable) and the linker flags required for the Windows shim layer.
|
||||
The `base` target is (almost) the only one depending on `allocator`. No other
|
||||
targets should depend on it, with the exception of the very few executables /
|
||||
dynamic libraries that don't depend, either directly or indirectly, on `base`
|
||||
within the scope of a linker unit.
|
||||
|
||||
More importantly, **no other place outside of `/base` should depend on the
|
||||
specific allocator** (e.g., directly include `third_party/tcmalloc`).
|
||||
If such a functional dependency is required that should be achieved using
|
||||
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
|
||||
`/base/memory/`)
|
||||
|
||||
**Why `base` depends on `allocator`?**
|
||||
Because it needs to provide services that depend on the actual allocator
|
||||
implementation. In the past `base` used to pretend to be allocator-agnostic
|
||||
and get the dependencies injected by other layers. This ended up being an
|
||||
inconsistent mess.
|
||||
See the [allocator cleanup doc][url-allocator-cleanup] for more context.
|
||||
|
||||
Linker unit targets (executables and shared libraries) that depend in some way
|
||||
on `base` (most of the targets in the codebase) get automatically the correct
|
||||
set of linker flags to pull in tcmalloc or the Windows shim-layer.
|
||||
|
||||
|
||||
Source code
|
||||
-----------
|
||||
This directory contains just the allocator (i.e. shim) layer that switches
|
||||
between the different underlying memory allocation implementations.
|
||||
|
||||
The tcmalloc library originates outside of Chromium and exists in
|
||||
`../../third_party/tcmalloc` (currently, the actual location is defined in the
|
||||
allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
|
||||
track Chromium-specific changes independently from upstream changes.
|
||||
|
||||
The general intent is to push local changes upstream so that over
|
||||
time we no longer need any forked files.
|
||||
|
||||
|
||||
Unified allocator shim
|
||||
----------------------
|
||||
On most platforms, Chrome overrides the malloc / operator new symbols (and
|
||||
corresponding free / delete and other variants). This is to enforce security
|
||||
checks and lately to enable the
|
||||
[memory-infra heap profiler][url-memory-infra-heap-profiler].
|
||||
Historically each platform had its special logic for defining the allocator
|
||||
symbols in different places of the codebase. The unified allocator shim is
|
||||
a project aimed to unify the symbol definition and allocator routing logic in
|
||||
a central place.
|
||||
|
||||
- Full documentation: [Allocator shim design doc][url-allocator-shim].
|
||||
- Current state: Available and enabled by default on Android, CrOS, Linux,
|
||||
Mac OS and Windows.
|
||||
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
|
||||
- Build-time flag: `use_allocator_shim`.
|
||||
|
||||
**Overview of the unified allocator shim**
|
||||
The allocator shim consists of three stages:
|
||||
```
|
||||
+-------------------------+ +-----------------------+ +----------------+
|
||||
| malloc & friends | -> | shim layer | -> | Routing to |
|
||||
| symbols definition | | implementation | | allocator |
|
||||
+-------------------------+ +-----------------------+ +----------------+
|
||||
| - libc symbols (malloc, | | - Security checks | | - tcmalloc |
|
||||
| calloc, free, ...) | | - Chain of dispatchers| | - glibc |
|
||||
| - C++ symbols (operator | | that can intercept | | - Android |
|
||||
| new, delete, ...) | | and override | | bionic |
|
||||
| - glibc weak symbols | | allocations | | - WinHeap |
|
||||
| (__libc_malloc, ...) | +-----------------------+ +----------------+
|
||||
+-------------------------+
|
||||
```
|
||||
|
||||
**1. malloc symbols definition**
|
||||
This stage takes care of overriding the symbols `malloc`, `free`,
|
||||
`operator new`, `operator delete` and friends and routing those calls inside the
|
||||
allocator shim (next point).
|
||||
This is taken care of by the headers in `allocator_shim_override_*`.
|
||||
|
||||
*On Windows*: Windows' UCRT (Universal C Runtime) exports weak symbols, that we
|
||||
can override in `allocator_shim_override_ucr_symbols_win.h`.
|
||||
|
||||
*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
|
||||
in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
|
||||
and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
|
||||
`operator delete` and friends).
|
||||
This enables proper interposition of malloc symbols referenced by the main
|
||||
executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
|
||||
(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
|
||||
Additionally, when tcmalloc is the default allocator, some extra glibc symbols
|
||||
are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
|
||||
reasons explained in that file.
|
||||
The Linux/CrOS shim was introduced by
|
||||
[crrev.com/1675143004](https://crrev.com/1675143004).
|
||||
|
||||
*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
|
||||
possible. This is because Android processes are `fork()`-ed from the Android
|
||||
zygote, which pre-loads libc.so and only later native code gets loaded via
|
||||
`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
|
||||
scope).
|
||||
In this case, the approach instead of wrapping symbol resolution at link time
|
||||
(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
|
||||
The use of this wrapping flag causes:
|
||||
- All references to allocator symbols in the Chrome codebase to be rewritten as
|
||||
references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
|
||||
defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
|
||||
route allocator calls inside the shim layer.
|
||||
- The reference to the original `malloc` symbols (which typically is defined by
|
||||
the system's libc.so) are accessible via the special `__real_malloc` and
|
||||
friends symbols (which will be relocated, at load time, against `malloc`).
|
||||
|
||||
In summary, this approach is transparent to the dynamic loader, which still sees
|
||||
undefined symbol references to malloc symbols.
|
||||
These symbols will be resolved against libc.so as usual.
|
||||
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
|
||||
|
||||
**2. Shim layer implementation**
|
||||
This stage contains the actual shim implementation. This consists of:
|
||||
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
|
||||
(using the `InsertAllocatorDispatch` API). They can intercept and override
|
||||
allocator calls.
|
||||
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
|
||||
This happens inside `allocator_shim.cc`
|
||||
|
||||
**3. Final allocator routing**
|
||||
The final element of the aforementioned dispatcher chain is statically defined
|
||||
at build time and ultimately routes the allocator calls to the actual allocator
|
||||
(as described in the *Background* section above). This is taken care of by the
|
||||
headers in `allocator_shim_default_dispatch_to_*` files.
|
||||
|
||||
|
||||
Related links
|
||||
-------------
|
||||
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
|
||||
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
|
||||
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
|
||||
- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
|
||||
|
||||
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
|
||||
[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
|
||||
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
|
142
src/base/allocator/allocator.gni
Normal file
142
src/base/allocator/allocator.gni
Normal file
@ -0,0 +1,142 @@
|
||||
# Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import("//build/config/chromecast_build.gni")
|
||||
import("//build/config/sanitizers/sanitizers.gni")
|
||||
|
||||
# Sanitizers replace the allocator, don't use our own.
|
||||
_is_using_sanitizers = is_asan || is_hwasan || is_lsan || is_tsan || is_msan
|
||||
|
||||
# - Component build support is disabled on all platforms. It is known to cause
|
||||
# issues on some (e.g. Windows with shims, Android with non-universal symbol
|
||||
# wrapping), and has not been validated on others.
|
||||
# - Windows: debug CRT is not compatible, see below.
|
||||
# - Chromecast on Android: causes issues with crash reporting, see b/178423326.
|
||||
_disable_partition_alloc =
|
||||
is_component_build || (is_win && is_debug) || (is_android && is_chromecast)
|
||||
_is_partition_alloc_platform = is_android || is_win || is_linux || is_chromeos
|
||||
|
||||
# The debug CRT on Windows has some debug features that are incompatible with
|
||||
# the shim. NaCl in particular does seem to link some binaries statically
|
||||
# against the debug CRT with "is_nacl=false".
|
||||
if ((is_linux || is_chromeos || is_android || is_apple ||
|
||||
(is_win && !is_component_build && !is_debug)) && !_is_using_sanitizers) {
|
||||
_default_use_allocator_shim = true
|
||||
} else {
|
||||
_default_use_allocator_shim = false
|
||||
}
|
||||
|
||||
if (_default_use_allocator_shim && _is_partition_alloc_platform &&
|
||||
!_disable_partition_alloc) {
|
||||
_default_allocator = "partition"
|
||||
} else if (is_android || is_apple || _is_using_sanitizers || is_win ||
|
||||
is_fuchsia || ((is_linux || is_chromeos) && target_cpu == "arm64") ||
|
||||
(is_cast_audio_only && target_cpu == "arm")) {
|
||||
# Temporarily disable tcmalloc on arm64 linux to get rid of compilation
|
||||
# errors.
|
||||
_default_allocator = "none"
|
||||
} else {
|
||||
_default_allocator = "tcmalloc"
|
||||
}
|
||||
|
||||
declare_args() {
|
||||
# Memory allocator to use. Set to "none" to use default allocator.
|
||||
use_allocator = _default_allocator
|
||||
|
||||
# Causes all the allocations to be routed via allocator_shim.cc.
|
||||
use_allocator_shim = _default_use_allocator_shim
|
||||
|
||||
# Whether PartitionAlloc should be available for use or not.
|
||||
# true makes PartitionAlloc linked to the executable or shared library and
|
||||
# makes it available for use, but it doesn't mean that the default allocator
|
||||
# is PartitionAlloc. PartitionAlloc may or may not be the default allocator.
|
||||
#
|
||||
# |use_allocator = "partition"| makes PartitionAlloc the default allocator
|
||||
# but it's effective only when |use_partition_alloc = true|.
|
||||
#
|
||||
# TODO(lizeb, yukishiino): Determine if |use_partition_alloc| is necessary or
|
||||
# not, and redesign or remove the flag accordingly. We may want to assert a
|
||||
# possible conflict between |use_allocator = "partition"| and
|
||||
# |use_partition_alloc = true| rather than prioritizing use_partition_alloc.
|
||||
use_partition_alloc = !is_ios # Never use PartitionAlloc on iOS.
|
||||
|
||||
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
|
||||
# of raw_ptr<T>, and enable PartitionAlloc support for it. The _fake option
|
||||
# doesn't enable BRP, but pretends it's enabled for the syntethic field trial
|
||||
#(for testing purposes only).
|
||||
use_backup_ref_ptr = false
|
||||
use_backup_ref_ptr_fake = false
|
||||
|
||||
# If BRP is enabled, additional options are available:
|
||||
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
|
||||
# previous slot (or in metadata if a slot starts on the page boundary), as
|
||||
# opposed to the beginning of the slot.
|
||||
# - never_remove_from_brp_pool_blocklist: never remove super-pages from the
|
||||
# BRP-pool block list
|
||||
# - enable_backup_ref_ptr_slow_checks: enable additional safety checks that
|
||||
# are too expensive to have on by default.
|
||||
enable_backup_ref_ptr_in_renderer_process = false
|
||||
put_ref_count_in_previous_slot = false
|
||||
never_remove_from_brp_pool_blocklist = false
|
||||
enable_backup_ref_ptr_slow_checks = false
|
||||
}
|
||||
|
||||
if (!use_partition_alloc && use_allocator == "partition") {
|
||||
# If there is a conflict, prioritize |use_partition_alloc| over
|
||||
# |use_allocator|.
|
||||
use_allocator = "none"
|
||||
}
|
||||
|
||||
assert(use_allocator == "none" || use_allocator == "tcmalloc" ||
|
||||
use_allocator == "partition")
|
||||
|
||||
assert(!is_win || use_allocator != "tcmalloc",
|
||||
"Tcmalloc doesn't work on Windows.")
|
||||
assert(!is_mac || use_allocator != "tcmalloc",
|
||||
"Tcmalloc doesn't work on macOS.")
|
||||
assert(!is_ios || use_allocator != "tcmalloc", "Tcmalloc doesn't work on iOS.")
|
||||
|
||||
assert(
|
||||
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
|
||||
is_fuchsia || is_apple,
|
||||
"use_allocator_shim works only on Android, iOS, Linux, macOS, Fuchsia, " +
|
||||
"and Windows.")
|
||||
|
||||
if (is_win && use_allocator_shim) {
|
||||
# TODO(crbug.com/1245317): Add a comment indicating why the shim doesn't work.
|
||||
assert(!is_component_build,
|
||||
"The allocator shim doesn't work for the component build on Windows.")
|
||||
}
|
||||
|
||||
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
|
||||
# In theory, such a configuration is possible, but its scope would be limited to
|
||||
# only Blink partitions, which is currently not tested. Better to trigger an
|
||||
# error, than have BackupRefPtr silently disabled while believing it is enabled.
|
||||
if (!is_nacl) {
|
||||
assert(!use_backup_ref_ptr || use_allocator == "partition",
|
||||
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
|
||||
}
|
||||
|
||||
# enable_backup_ref_ptr_in_renderer_process can only be used if
|
||||
# use_backup_ref_ptr is true.
|
||||
assert(
|
||||
use_backup_ref_ptr || !enable_backup_ref_ptr_in_renderer_process,
|
||||
"Can't enable BackupRefPtr in Renderer process if it isn't enabled at all")
|
||||
|
||||
# put_ref_count_in_previous_slot can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(
|
||||
use_backup_ref_ptr || !put_ref_count_in_previous_slot,
|
||||
"Can't put ref count in the previous slot if BackupRefPtr isn't enabled at all")
|
||||
|
||||
# never_remove_from_brp_pool_blocklist can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(
|
||||
use_backup_ref_ptr || !never_remove_from_brp_pool_blocklist,
|
||||
"never_remove_from_brp_pool_blocklist requires BackupRefPtr to be enabled")
|
||||
|
||||
# enable_backup_ref_ptr_slow_checks can only be used if use_backup_ref_ptr
|
||||
# is true.
|
||||
assert(use_backup_ref_ptr || !enable_backup_ref_ptr_slow_checks,
|
||||
"Can't enable additional BackupRefPtr checks if it isn't enabled at all")
|
47
src/base/allocator/allocator_check.cc
Normal file
47
src/base/allocator/allocator_check.cc
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_check.h"
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
bool IsAllocatorInitialized() {
|
||||
#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
// Set by allocator_shim_override_ucrt_symbols_win.h when the
|
||||
// shimmed _set_new_mode() is called.
|
||||
return g_is_win_shim_layer_initialized;
|
||||
#elif (defined(OS_LINUX) || defined(OS_CHROMEOS)) && \
|
||||
BUILDFLAG(USE_TCMALLOC) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
// From third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h.
|
||||
// TODO(primiano): replace with an include once base can depend on allocator.
|
||||
#define TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC 0xbeef42
|
||||
return (mallopt(TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC, 0) ==
|
||||
TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC);
|
||||
#elif defined(OS_APPLE) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
|
||||
!BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// From allocator_interception_mac.mm.
|
||||
return base::allocator::g_replaced_default_zone;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
18
src/base/allocator/allocator_check.h
Normal file
18
src/base/allocator/allocator_check.h
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
BASE_EXPORT bool IsAllocatorInitialized();
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_CHECK_H_
|
77
src/base/allocator/allocator_extension.cc
Normal file
77
src/base/allocator/allocator_extension.cc
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_extension.h"
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/check.h"
|
||||
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
|
||||
#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
|
||||
#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void ReleaseFreeMemory() {
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
::MallocExtension::instance()->ReleaseFreeMemory();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool GetNumericProperty(const char* name, size_t* value) {
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
return ::MallocExtension::instance()->GetNumericProperty(name, value);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool SetNumericProperty(const char* name, size_t value) {
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
return ::MallocExtension::instance()->SetNumericProperty(name, value);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void GetHeapSample(std::string* writer) {
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
::MallocExtension::instance()->GetHeapSample(writer);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool IsHeapProfilerRunning() {
|
||||
#if BUILDFLAG(USE_TCMALLOC) && defined(ENABLE_PROFILING)
|
||||
return ::IsHeapProfilerRunning();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
|
||||
// TODO(sque): Use allocator shim layer instead.
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
// Make sure no hooks get overwritten.
|
||||
auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
|
||||
if (alloc_hook)
|
||||
DCHECK(!prev_alloc_hook);
|
||||
|
||||
auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
|
||||
if (free_hook)
|
||||
DCHECK(!prev_free_hook);
|
||||
#endif
|
||||
}
|
||||
|
||||
int GetCallStack(void** stack, int max_stack_size) {
|
||||
#if BUILDFLAG(USE_TCMALLOC)
|
||||
return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
67
src/base/allocator/allocator_extension.h
Normal file
67
src/base/allocator/allocator_extension.h
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
||||
|
||||
#include <stddef.h> // for size_t
|
||||
#include <string>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
// Callback types for alloc and free.
|
||||
using AllocHookFunc = void (*)(const void*, size_t);
|
||||
using FreeHookFunc = void (*)(const void*);
|
||||
|
||||
// Request that the allocator release any free memory it knows about to the
|
||||
// system.
|
||||
BASE_EXPORT void ReleaseFreeMemory();
|
||||
|
||||
// Get the named property's |value|. Returns true if the property is known.
|
||||
// Returns false if the property is not a valid property name for the current
|
||||
// allocator implementation.
|
||||
// |name| or |value| cannot be NULL
|
||||
BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
|
||||
|
||||
// Set the named property's |value|. Returns true if the property is known and
|
||||
// writable. Returns false if the property is not a valid property name for the
|
||||
// current allocator implementation, or is not writable. |name| cannot be NULL.
|
||||
BASE_EXPORT bool SetNumericProperty(const char* name, size_t value);
|
||||
|
||||
// Outputs to |writer| a sample of live objects and the stack traces
|
||||
// that allocated these objects. The format of the returned output
|
||||
// is equivalent to the output of the heap profiler and can
|
||||
// therefore be passed to "pprof".
|
||||
// NOTE: by default, the allocator does not do any heap sampling, and this
|
||||
// function will always return an empty sample. To get useful
|
||||
// data from GetHeapSample, you must also set the numeric property
|
||||
// "tcmalloc.sampling_period_bytes" to a value such as 524288.
|
||||
BASE_EXPORT void GetHeapSample(std::string* writer);
|
||||
|
||||
BASE_EXPORT bool IsHeapProfilerRunning();
|
||||
|
||||
// Register callbacks for alloc and free. Can only store one callback at a time
|
||||
// for each of alloc and free.
|
||||
BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
|
||||
|
||||
// Attempts to unwind the call stack from the current location where this
|
||||
// function is being called from. Must be called from a hook function registered
|
||||
// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
|
||||
//
|
||||
// Arguments:
|
||||
// stack: pointer to a pre-allocated array of void*'s.
|
||||
// max_stack_size: indicates the size of the array in |stack|.
|
||||
//
|
||||
// Returns the number of call stack frames stored in |stack|, or 0 if no call
|
||||
// stack information is available.
|
||||
BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
|
65
src/base/allocator/allocator_interception_mac.h
Normal file
65
src/base/allocator/allocator_interception_mac.h
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
struct MallocZoneFunctions;
|
||||
|
||||
// This initializes AllocatorDispatch::default_dispatch by saving pointers to
|
||||
// the functions in the current default malloc zone. This must be called before
|
||||
// the default malloc zone is changed to have its intended effect.
|
||||
void InitializeDefaultDispatchToMacAllocator();
|
||||
|
||||
// Saves the function pointers currently used by the default zone.
|
||||
void StoreFunctionsForDefaultZone();
|
||||
|
||||
// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
|
||||
void StoreFunctionsForAllZones();
|
||||
|
||||
// For all malloc zones that have been stored, replace their functions with
|
||||
// |functions|.
|
||||
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
|
||||
|
||||
extern bool g_replaced_default_zone;
|
||||
|
||||
// Calls the original implementation of malloc/calloc prior to interception.
|
||||
bool UncheckedMallocMac(size_t size, void** result);
|
||||
bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
|
||||
|
||||
// Intercepts calls to default and purgeable malloc zones. Intercepts Core
|
||||
// Foundation and Objective-C allocations.
|
||||
// Has no effect on the default malloc zone if the allocator shim already
|
||||
// performs that interception.
|
||||
BASE_EXPORT void InterceptAllocationsMac();
|
||||
|
||||
// Updates all malloc zones to use their original functions.
|
||||
// Also calls ClearAllMallocZonesForTesting.
|
||||
BASE_EXPORT void UninterceptMallocZonesForTesting();
|
||||
|
||||
// Returns true if allocations are successfully being intercepted for all malloc
|
||||
// zones.
|
||||
bool AreMallocZonesIntercepted();
|
||||
|
||||
// Periodically checks for, and shims new malloc zones. Stops checking after 1
|
||||
// minute.
|
||||
BASE_EXPORT void PeriodicallyShimNewMallocZones();
|
||||
|
||||
// Exposed for testing.
|
||||
BASE_EXPORT void ShimNewMallocZones();
|
||||
BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions);
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
|
612
src/base/allocator/allocator_interception_mac.mm
Normal file
612
src/base/allocator/allocator_interception_mac.mm
Normal file
@ -0,0 +1,612 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This file contains all the logic necessary to intercept allocations on
|
||||
// macOS. "malloc zones" are an abstraction that allows the process to intercept
|
||||
// all malloc-related functions. There is no good mechanism [short of
|
||||
// interposition] to determine new malloc zones are added, so there's no clean
|
||||
// mechanism to intercept all malloc zones. This file contains logic to
|
||||
// intercept the default and purgeable zones, which always exist. A cursory
|
||||
// review of Chrome seems to imply that non-default zones are almost never used.
|
||||
//
|
||||
// This file also contains logic to intercept Core Foundation and Objective-C
|
||||
// allocations. The implementations forward to the default malloc zone, so the
|
||||
// only reason to intercept these calls is to re-label OOM crashes with slightly
|
||||
// more details.
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#import <Foundation/Foundation.h>
|
||||
#include <errno.h>
|
||||
#include <mach/mach.h>
|
||||
#import <objc/runtime.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/mac/mach_logging.h"
|
||||
#include "base/process/memory.h"
|
||||
#include "base/threading/sequenced_task_runner_handle.h"
|
||||
#include "build/build_config.h"
|
||||
#include "third_party/apple_apsl/CFBase.h"
|
||||
|
||||
#if defined(OS_IOS)
|
||||
#include "base/ios/ios_util.h"
|
||||
#else
|
||||
#include "base/mac/mac_util.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
bool g_replaced_default_zone = false;
|
||||
|
||||
namespace {
|
||||
|
||||
bool g_oom_killer_enabled;
|
||||
bool g_allocator_shims_failed_to_install;
|
||||
|
||||
// Starting with Mac OS X 10.7, the zone allocators set up by the system are
|
||||
// read-only, to prevent them from being overwritten in an attack. However,
|
||||
// blindly unprotecting and reprotecting the zone allocators fails with
|
||||
// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
|
||||
// memory in its bss. Explicit saving/restoring of the protection is required.
|
||||
//
|
||||
// This function takes a pointer to a malloc zone, de-protects it if necessary,
|
||||
// and returns (in the out parameters) a region of memory (if any) to be
|
||||
// re-protected when modifications are complete. This approach assumes that
|
||||
// there is no contention for the protection of this memory.
|
||||
//
|
||||
// Returns true if the malloc zone was properly de-protected, or false
|
||||
// otherwise. If this function returns false, the out parameters are invalid and
|
||||
// the region does not need to be re-protected.
|
||||
bool DeprotectMallocZone(ChromeMallocZone* default_zone,
|
||||
vm_address_t* reprotection_start,
|
||||
vm_size_t* reprotection_length,
|
||||
vm_prot_t* reprotection_value) {
|
||||
mach_port_t unused;
|
||||
*reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
|
||||
struct vm_region_basic_info_64 info;
|
||||
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
||||
kern_return_t result =
|
||||
vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
|
||||
VM_REGION_BASIC_INFO_64,
|
||||
reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
|
||||
if (result != KERN_SUCCESS) {
|
||||
MACH_LOG(ERROR, result) << "vm_region_64";
|
||||
return false;
|
||||
}
|
||||
|
||||
// The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
|
||||
// balance it with a deallocate in case this ever changes. See
|
||||
// the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
|
||||
// https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
|
||||
mach_port_deallocate(mach_task_self(), unused);
|
||||
|
||||
if (!(info.max_protection & VM_PROT_WRITE)) {
|
||||
LOG(ERROR) << "Invalid max_protection " << info.max_protection;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Does the region fully enclose the zone pointers? Possibly unwarranted
|
||||
// simplification used: using the size of a full version 10 malloc zone rather
|
||||
// than the actual smaller size if the passed-in zone is not version 10.
|
||||
DCHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone));
|
||||
vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
|
||||
reinterpret_cast<vm_address_t>(*reprotection_start);
|
||||
DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
|
||||
|
||||
if (info.protection & VM_PROT_WRITE) {
|
||||
// No change needed; the zone is already writable.
|
||||
*reprotection_start = 0;
|
||||
*reprotection_length = 0;
|
||||
*reprotection_value = VM_PROT_NONE;
|
||||
} else {
|
||||
*reprotection_value = info.protection;
|
||||
result =
|
||||
vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
|
||||
false, info.protection | VM_PROT_WRITE);
|
||||
if (result != KERN_SUCCESS) {
|
||||
MACH_LOG(ERROR, result) << "vm_protect";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
|
||||
MallocZoneFunctions g_old_zone;
|
||||
MallocZoneFunctions g_old_purgeable_zone;
|
||||
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_zone.malloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_calloc(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size) {
|
||||
void* result = g_old_zone.calloc(zone, num_items, size);
|
||||
if (!result && num_items && size)
|
||||
TerminateBecauseOutOfMemory(num_items * size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_zone.valloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
|
||||
g_old_zone.free(zone, ptr);
|
||||
}
|
||||
|
||||
void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
void* result = g_old_zone.realloc(zone, ptr, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_memalign(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size) {
|
||||
void* result = g_old_zone.memalign(zone, alignment, size);
|
||||
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||
if (!result && size && alignment >= sizeof(void*) &&
|
||||
base::bits::IsPowerOfTwo(alignment)) {
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_purgeable_zone.malloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
|
||||
if (!result && num_items && size)
|
||||
TerminateBecauseOutOfMemory(num_items * size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
|
||||
void* result = g_old_purgeable_zone.valloc(zone, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
|
||||
g_old_purgeable_zone.free(zone, ptr);
|
||||
}
|
||||
|
||||
void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
|
||||
if (!result && size)
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size) {
|
||||
void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
|
||||
// Only die if posix_memalign would have returned ENOMEM, since there are
|
||||
// other reasons why null might be returned. See posix_memalign() in 10.15's
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
|
||||
if (!result && size && alignment >= sizeof(void*) &&
|
||||
base::bits::IsPowerOfTwo(alignment)) {
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !defined(ADDRESS_SANITIZER)
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
|
||||
// === Core Foundation CFAllocators ===
|
||||
|
||||
bool CanGetContextForCFAllocator() {
|
||||
#if defined(OS_IOS)
|
||||
return !base::ios::IsRunningOnOrLater(14, 0, 0);
|
||||
#else
|
||||
return !base::mac::IsOSLaterThan12_DontCallThis();
|
||||
#endif
|
||||
}
|
||||
|
||||
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
|
||||
ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
|
||||
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
|
||||
return &our_allocator->_context;
|
||||
}
|
||||
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
|
||||
CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
|
||||
|
||||
void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
|
||||
CFOptionFlags hint,
|
||||
void* info) {
|
||||
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(alloc_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // !defined(ADDRESS_SANITIZER)
|
||||
|
||||
// === Cocoa NSObject allocation ===
|
||||
|
||||
typedef id (*allocWithZone_t)(id, SEL, NSZone*);
|
||||
allocWithZone_t g_old_allocWithZone;
|
||||
|
||||
id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
|
||||
id result = g_old_allocWithZone(self, _cmd, zone);
|
||||
if (!result)
|
||||
TerminateBecauseOutOfMemory(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
|
||||
ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
|
||||
if (!IsMallocZoneAlreadyStored(chrome_zone))
|
||||
return;
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(zone);
|
||||
ReplaceZoneFunctions(chrome_zone, &functions);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool UncheckedMallocMac(size_t size, void** result) {
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
*result = malloc(size);
|
||||
#else
|
||||
if (g_old_zone.malloc) {
|
||||
*result = g_old_zone.malloc(malloc_default_zone(), size);
|
||||
} else {
|
||||
*result = malloc(size);
|
||||
}
|
||||
#endif // defined(ADDRESS_SANITIZER)
|
||||
|
||||
return *result != NULL;
|
||||
}
|
||||
|
||||
bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
*result = calloc(num_items, size);
|
||||
#else
|
||||
if (g_old_zone.calloc) {
|
||||
*result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
|
||||
} else {
|
||||
*result = calloc(num_items, size);
|
||||
}
|
||||
#endif // defined(ADDRESS_SANITIZER)
|
||||
|
||||
return *result != NULL;
|
||||
}
|
||||
|
||||
void InitializeDefaultDispatchToMacAllocator() {
|
||||
StoreFunctionsForAllZones();
|
||||
}
|
||||
|
||||
void StoreFunctionsForDefaultZone() {
|
||||
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
|
||||
malloc_default_zone());
|
||||
StoreMallocZone(default_zone);
|
||||
}
|
||||
|
||||
void StoreFunctionsForAllZones() {
|
||||
// This ensures that the default zone is always at the front of the array,
|
||||
// which is important for performance.
|
||||
StoreFunctionsForDefaultZone();
|
||||
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||
if (kr != KERN_SUCCESS)
|
||||
return;
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||
StoreMallocZone(zone);
|
||||
}
|
||||
}
|
||||
|
||||
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
|
||||
// The default zone does not get returned in malloc_get_all_zones().
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
|
||||
ReplaceZoneFunctions(default_zone, functions);
|
||||
}
|
||||
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
|
||||
if (kr != KERN_SUCCESS)
|
||||
return;
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
|
||||
if (DoesMallocZoneNeedReplacing(zone, functions)) {
|
||||
ReplaceZoneFunctions(zone, functions);
|
||||
}
|
||||
}
|
||||
g_replaced_default_zone = true;
|
||||
}
|
||||
|
||||
void InterceptAllocationsMac() {
|
||||
if (g_oom_killer_enabled)
|
||||
return;
|
||||
|
||||
g_oom_killer_enabled = true;
|
||||
|
||||
// === C malloc/calloc/valloc/realloc/posix_memalign ===
|
||||
|
||||
// This approach is not perfect, as requests for amounts of memory larger than
|
||||
// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will still
|
||||
// fail with a NULL rather than dying (see malloc_zone_malloc() in
|
||||
// https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c for
|
||||
// details). Unfortunately, it's the best we can do. Also note that this does
|
||||
// not affect allocations from non-default zones.
|
||||
|
||||
#if !defined(ADDRESS_SANITIZER)
|
||||
// Don't do anything special on OOM for the malloc zones replaced by
|
||||
// AddressSanitizer, as modifying or protecting them may not work correctly.
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// The malloc zone backed by PartitionAlloc crashes by default, so there is
|
||||
// no need to install the OOM killer.
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
if (!IsMallocZoneAlreadyStored(default_zone)) {
|
||||
StoreZoneFunctions(default_zone, &g_old_zone);
|
||||
MallocZoneFunctions new_functions = {};
|
||||
new_functions.malloc = oom_killer_malloc;
|
||||
new_functions.calloc = oom_killer_calloc;
|
||||
new_functions.valloc = oom_killer_valloc;
|
||||
new_functions.free = oom_killer_free;
|
||||
new_functions.realloc = oom_killer_realloc;
|
||||
new_functions.memalign = oom_killer_memalign;
|
||||
|
||||
ReplaceZoneFunctions(default_zone, &new_functions);
|
||||
g_replaced_default_zone = true;
|
||||
}
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
ChromeMallocZone* purgeable_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
|
||||
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
|
||||
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
|
||||
MallocZoneFunctions new_functions = {};
|
||||
new_functions.malloc = oom_killer_malloc_purgeable;
|
||||
new_functions.calloc = oom_killer_calloc_purgeable;
|
||||
new_functions.valloc = oom_killer_valloc_purgeable;
|
||||
new_functions.free = oom_killer_free_purgeable;
|
||||
new_functions.realloc = oom_killer_realloc_purgeable;
|
||||
new_functions.memalign = oom_killer_memalign_purgeable;
|
||||
ReplaceZoneFunctions(purgeable_zone, &new_functions);
|
||||
}
|
||||
#endif
|
||||
|
||||
// === C malloc_zone_batch_malloc ===
|
||||
|
||||
// batch_malloc is omitted because the default malloc zone's implementation
|
||||
// only supports batch_malloc for "tiny" allocations from the free list. It
|
||||
// will fail for allocations larger than "tiny", and will only allocate as
|
||||
// many blocks as it's able to from the free list. These factors mean that it
|
||||
// can return less than the requested memory even in a non-out-of-memory
|
||||
// situation. There's no good way to detect whether a batch_malloc failure is
|
||||
// due to these other factors, or due to genuine memory or address space
|
||||
// exhaustion. The fact that it only allocates space from the "tiny" free list
|
||||
// means that it's likely that a failure will not be due to memory exhaustion.
|
||||
// Similarly, these constraints on batch_malloc mean that callers must always
|
||||
// be expecting to receive less memory than was requested, even in situations
|
||||
// where memory pressure is not a concern. Finally, the only public interface
|
||||
// to batch_malloc is malloc_zone_batch_malloc, which is specific to the
|
||||
// system's malloc implementation. It's unlikely that anyone's even heard of
|
||||
// it.
|
||||
|
||||
#ifndef ADDRESS_SANITIZER
|
||||
// === Core Foundation CFAllocators ===
|
||||
|
||||
// This will not catch allocation done by custom allocators, but will catch
|
||||
// all allocation done by system-provided ones.
|
||||
|
||||
CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
|
||||
!g_old_cfallocator_malloc_zone)
|
||||
<< "Old allocators unexpectedly non-null";
|
||||
|
||||
bool cf_allocator_internals_known = CanGetContextForCFAllocator();
|
||||
|
||||
if (cf_allocator_internals_known) {
|
||||
CFAllocatorContext* context =
|
||||
ContextForCFAllocator(kCFAllocatorSystemDefault);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
|
||||
g_old_cfallocator_system_default = context->allocate;
|
||||
CHECK(g_old_cfallocator_system_default)
|
||||
<< "Failed to get kCFAllocatorSystemDefault allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_system_default;
|
||||
|
||||
context = ContextForCFAllocator(kCFAllocatorMalloc);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
|
||||
g_old_cfallocator_malloc = context->allocate;
|
||||
CHECK(g_old_cfallocator_malloc)
|
||||
<< "Failed to get kCFAllocatorMalloc allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_malloc;
|
||||
|
||||
context = ContextForCFAllocator(kCFAllocatorMallocZone);
|
||||
CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
|
||||
g_old_cfallocator_malloc_zone = context->allocate;
|
||||
CHECK(g_old_cfallocator_malloc_zone)
|
||||
<< "Failed to get kCFAllocatorMallocZone allocation function.";
|
||||
context->allocate = oom_killer_cfallocator_malloc_zone;
|
||||
} else {
|
||||
DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
|
||||
"failures via CFAllocator will not result in termination. "
|
||||
"http://crbug.com/45650";
|
||||
}
|
||||
#endif
|
||||
|
||||
// === Cocoa NSObject allocation ===
|
||||
|
||||
// Note that both +[NSObject new] and +[NSObject alloc] call through to
|
||||
// +[NSObject allocWithZone:].
|
||||
|
||||
CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
|
||||
|
||||
Class nsobject_class = [NSObject class];
|
||||
Method orig_method =
|
||||
class_getClassMethod(nsobject_class, @selector(allocWithZone:));
|
||||
g_old_allocWithZone =
|
||||
reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
|
||||
CHECK(g_old_allocWithZone)
|
||||
<< "Failed to get allocWithZone allocation function.";
|
||||
method_setImplementation(orig_method,
|
||||
reinterpret_cast<IMP>(oom_killer_allocWithZone));
|
||||
}
|
||||
|
||||
void UninterceptMallocZonesForTesting() {
|
||||
UninterceptMallocZoneForTesting(malloc_default_zone());
|
||||
vm_address_t* zones;
|
||||
unsigned int count;
|
||||
kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
|
||||
CHECK(kr == KERN_SUCCESS);
|
||||
for (unsigned int i = 0; i < count; ++i) {
|
||||
UninterceptMallocZoneForTesting(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
|
||||
}
|
||||
|
||||
ClearAllMallocZonesForTesting();
|
||||
}
|
||||
|
||||
bool AreMallocZonesIntercepted() {
|
||||
return !g_allocator_shims_failed_to_install;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void ShimNewMallocZonesAndReschedule(base::Time end_time,
|
||||
base::TimeDelta delay) {
|
||||
ShimNewMallocZones();
|
||||
|
||||
if (base::Time::Now() > end_time)
|
||||
return;
|
||||
|
||||
base::TimeDelta next_delay = delay * 2;
|
||||
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
|
||||
delay);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void PeriodicallyShimNewMallocZones() {
|
||||
base::Time end_time = base::Time::Now() + base::TimeDelta::FromMinutes(1);
|
||||
base::TimeDelta initial_delay = base::TimeDelta::FromSeconds(1);
|
||||
ShimNewMallocZonesAndReschedule(end_time, initial_delay);
|
||||
}
|
||||
|
||||
void ShimNewMallocZones() {
|
||||
StoreFunctionsForAllZones();
|
||||
|
||||
// Use the functions for the default zone as a template to replace those
|
||||
// new zones.
|
||||
ChromeMallocZone* default_zone =
|
||||
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
|
||||
DCHECK(IsMallocZoneAlreadyStored(default_zone));
|
||||
|
||||
MallocZoneFunctions new_functions;
|
||||
StoreZoneFunctions(default_zone, &new_functions);
|
||||
ReplaceFunctionsForStoredZones(&new_functions);
|
||||
}
|
||||
|
||||
void ReplaceZoneFunctions(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions) {
|
||||
// Remove protection.
|
||||
vm_address_t reprotection_start = 0;
|
||||
vm_size_t reprotection_length = 0;
|
||||
vm_prot_t reprotection_value = VM_PROT_NONE;
|
||||
bool success = DeprotectMallocZone(zone, &reprotection_start,
|
||||
&reprotection_length, &reprotection_value);
|
||||
if (!success) {
|
||||
g_allocator_shims_failed_to_install = true;
|
||||
return;
|
||||
}
|
||||
|
||||
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||
functions->free && functions->realloc);
|
||||
zone->malloc = functions->malloc;
|
||||
zone->calloc = functions->calloc;
|
||||
zone->valloc = functions->valloc;
|
||||
zone->free = functions->free;
|
||||
zone->realloc = functions->realloc;
|
||||
if (functions->batch_malloc)
|
||||
zone->batch_malloc = functions->batch_malloc;
|
||||
if (functions->batch_free)
|
||||
zone->batch_free = functions->batch_free;
|
||||
if (functions->size)
|
||||
zone->size = functions->size;
|
||||
if (zone->version >= 5 && functions->memalign) {
|
||||
zone->memalign = functions->memalign;
|
||||
}
|
||||
if (zone->version >= 6 && functions->free_definite_size) {
|
||||
zone->free_definite_size = functions->free_definite_size;
|
||||
}
|
||||
|
||||
// Restore protection if it was active.
|
||||
if (reprotection_start) {
|
||||
kern_return_t result =
|
||||
vm_protect(mach_task_self(), reprotection_start, reprotection_length,
|
||||
false, reprotection_value);
|
||||
MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
412
src/base/allocator/allocator_shim.cc
Normal file
412
src/base/allocator/allocator_shim.cc
Normal file
@ -0,0 +1,412 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/memory/page_size.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if !defined(OS_WIN)
|
||||
#include <unistd.h>
|
||||
#else
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#include <malloc/malloc.h>
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#include "base/mac/mach_logging.h"
|
||||
#endif
|
||||
|
||||
// No calls to malloc / new in this file. They would would cause re-entrancy of
|
||||
// the shim, which is hard to deal with. Keep this code as simple as possible
|
||||
// and don't use any external C++ object here, not even //base ones. Even if
|
||||
// they are safe to use today, in future they might be refactored.
|
||||
|
||||
namespace {
|
||||
|
||||
std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
|
||||
&base::allocator::AllocatorDispatch::default_dispatch};
|
||||
|
||||
bool g_call_new_handler_on_malloc_failure = false;
|
||||
|
||||
ALWAYS_INLINE size_t GetCachedPageSize() {
|
||||
static size_t pagesize = 0;
|
||||
if (!pagesize)
|
||||
pagesize = base::GetPageSize();
|
||||
return pagesize;
|
||||
}
|
||||
|
||||
// Calls the std::new handler thread-safely. Returns true if a new_handler was
|
||||
// set and called, false if no new_handler was set.
|
||||
bool CallNewHandler(size_t size) {
|
||||
#if defined(OS_WIN)
|
||||
return base::allocator::WinCallNewHandler(size);
|
||||
#else
|
||||
std::new_handler nh = std::get_new_handler();
|
||||
if (!nh)
|
||||
return false;
|
||||
(*nh)();
|
||||
// Assume the new_handler will abort if it fails. Exception are disabled and
|
||||
// we don't support the case of a new_handler throwing std::bad_balloc.
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
|
||||
return g_chain_head.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void SetCallNewHandlerOnMallocFailure(bool value) {
|
||||
g_call_new_handler_on_malloc_failure = value;
|
||||
}
|
||||
|
||||
void* UncheckedAlloc(size_t size) {
|
||||
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
|
||||
}
|
||||
|
||||
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
|
||||
// Loop in case of (an unlikely) race on setting the list head.
|
||||
size_t kMaxRetries = 7;
|
||||
for (size_t i = 0; i < kMaxRetries; ++i) {
|
||||
const AllocatorDispatch* chain_head = GetChainHead();
|
||||
dispatch->next = chain_head;
|
||||
|
||||
// This function guarantees to be thread-safe w.r.t. concurrent
|
||||
// insertions. It also has to guarantee that all the threads always
|
||||
// see a consistent chain, hence the atomic_thread_fence() below.
|
||||
// InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
|
||||
// we don't really want this to be a release-store with a corresponding
|
||||
// acquire-load during malloc().
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
// Set the chain head to the new dispatch atomically. If we lose the race,
|
||||
// retry.
|
||||
if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
|
||||
std::memory_order_relaxed,
|
||||
std::memory_order_relaxed)) {
|
||||
// Success.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CHECK(false); // Too many retries, this shouldn't happen.
|
||||
}
|
||||
|
||||
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
|
||||
DCHECK_EQ(GetChainHead(), dispatch);
|
||||
g_chain_head.store(dispatch->next, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
// The Shim* functions below are the entry-points into the shim-layer and
|
||||
// are supposed to be invoked by the allocator_shim_override_*
|
||||
// headers to route the malloc / new symbols through the shim layer.
|
||||
// They are defined as ALWAYS_INLINE in order to remove a level of indirection
|
||||
// between the system-defined entry points and the shim implementations.
|
||||
extern "C" {
|
||||
|
||||
// The general pattern for allocations is:
|
||||
// - Try to allocate, if succeded return the pointer.
|
||||
// - If the allocation failed:
|
||||
// - Call the std::new_handler if it was a C++ allocation.
|
||||
// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
|
||||
// AND SetCallNewHandlerOnMallocFailure(true).
|
||||
// - If the std::new_handler is NOT set just return nullptr.
|
||||
// - If the std::new_handler is set:
|
||||
// - Assume it will abort() if it fails (very likely the new_handler will
|
||||
// just suicide printing a message).
|
||||
// - Assume it did succeed if it returns, in which case reattempt the alloc.
|
||||
|
||||
ALWAYS_INLINE void* ShimCppNew(size_t size) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
void* context = nullptr;
|
||||
#if defined(OS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||
} while (!ptr && CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
|
||||
void* context = nullptr;
|
||||
#if defined(OS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->alloc_unchecked_function(chain_head, size, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
void* context = nullptr;
|
||||
#if defined(OS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||
context);
|
||||
} while (!ptr && CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimCppDelete(void* address) {
|
||||
void* context = nullptr;
|
||||
#if defined(OS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
context = malloc_default_zone();
|
||||
#endif
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_function(chain_head, size, context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
|
||||
// realloc(size == 0) means free() and might return a nullptr. We should
|
||||
// not call the std::new_handler in that case, though.
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->realloc_function(chain_head, address, size, context);
|
||||
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr;
|
||||
do {
|
||||
ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
|
||||
// posix_memalign is supposed to check the arguments. See tc_posix_memalign()
|
||||
// in tc_malloc.cc.
|
||||
if (((alignment % sizeof(void*)) != 0) ||
|
||||
!base::bits::IsPowerOfTwo(alignment)) {
|
||||
return EINVAL;
|
||||
}
|
||||
void* ptr = ShimMemalign(alignment, size, nullptr);
|
||||
*res = ptr;
|
||||
return ptr ? 0 : ENOMEM;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
|
||||
return ShimMemalign(GetCachedPageSize(), size, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimPvalloc(size_t size) {
|
||||
// pvalloc(0) should allocate one page, according to its man page.
|
||||
if (size == 0) {
|
||||
size = GetCachedPageSize();
|
||||
} else {
|
||||
size = base::bits::AlignUp(size, GetCachedPageSize());
|
||||
}
|
||||
// The third argument is nullptr because pvalloc is glibc only and does not
|
||||
// exist on OSX/BSD systems.
|
||||
return ShimMemalign(GetCachedPageSize(), size, nullptr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimFree(void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->get_size_estimate_function(
|
||||
chain_head, const_cast<void*>(address), context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->batch_malloc_function(chain_head, size, results,
|
||||
num_requested, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->batch_free_function(chain_head, to_be_freed,
|
||||
num_to_be_freed, context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->free_definite_size_function(chain_head, ptr, size,
|
||||
context);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr = nullptr;
|
||||
do {
|
||||
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
|
||||
context);
|
||||
} while (!ptr && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
// _aligned_realloc(size == 0) means _aligned_free() and might return a
|
||||
// nullptr. We should not call the std::new_handler in that case, though.
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
void* ptr = nullptr;
|
||||
do {
|
||||
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
|
||||
alignment, context);
|
||||
} while (!ptr && size && g_call_new_handler_on_malloc_failure &&
|
||||
CallNewHandler(size));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
|
||||
const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
|
||||
return chain_head->aligned_free_function(chain_head, address, context);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#if !defined(OS_WIN) && \
|
||||
!(defined(OS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
|
||||
// Cpp symbols (new / delete) should always be routed through the shim layer
|
||||
// except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
|
||||
// malloc intercept is deep enough that it also catches the cpp calls.
|
||||
//
|
||||
// In case of PartitionAlloc-Everywhere on macOS, malloc backed by
|
||||
// base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
|
||||
// in case of operator new() noexcept. Thus, operator new() noexcept needs to
|
||||
// be routed to base::internal::PartitionMallocUnchecked through the shim layer.
|
||||
#include "base/allocator/allocator_shim_override_cpp_symbols.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_ANDROID)
|
||||
// Android does not support symbol interposition. The way malloc symbols are
|
||||
// intercepted on Android is by using link-time -wrap flags.
|
||||
#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
|
||||
#elif defined(OS_WIN)
|
||||
// On Windows we use plain link-time overriding of the CRT symbols.
|
||||
#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
|
||||
#elif defined(OS_APPLE)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_override_mac_default_zone.h"
|
||||
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#include "base/allocator/allocator_shim_override_mac_symbols.h"
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#else
|
||||
#include "base/allocator/allocator_shim_override_libc_symbols.h"
|
||||
#endif
|
||||
|
||||
// In the case of tcmalloc we also want to plumb into the glibc hooks
|
||||
// to avoid that allocations made in glibc itself (e.g., strdup()) get
|
||||
// accidentally performed on the glibc heap.
|
||||
//
|
||||
// More details:
|
||||
// Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
|
||||
// incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
|
||||
// glibc 2.23 for instance), and free() to free it. This causes issues for us,
|
||||
// as we are then asked to free memory we didn't allocate.
|
||||
//
|
||||
// This only happened in glibc to allocate TLS storage metadata, and there are
|
||||
// no other callers of __libc_memalign() there as of September 2020. To work
|
||||
// around this issue, intercept this internal libc symbol to make sure that both
|
||||
// the allocation and the free() are caught by the shim.
|
||||
//
|
||||
// This seems fragile, and is, but there is ample precedent for it, making it
|
||||
// quite likely to keep working in the future. For instance, both tcmalloc (in
|
||||
// libc_override_glibc.h, see in third_party/tcmalloc) and LLVM for LSAN use the
|
||||
// same mechanism.
|
||||
|
||||
#if defined(LIBC_GLIBC) && \
|
||||
(BUILDFLAG(USE_TCMALLOC) || BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
|
||||
#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void InitializeAllocatorShim() {
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Prepares the default dispatch. After the intercepted malloc calls have
|
||||
// traversed the shim this will route them to the default malloc zone.
|
||||
InitializeDefaultDispatchToMacAllocator();
|
||||
|
||||
MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
|
||||
|
||||
// This replaces the default malloc zone, causing calls to malloc & friends
|
||||
// from the codebase to be routed to ShimMalloc() above.
|
||||
base::allocator::ReplaceFunctionsForStoredZones(&functions);
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
#endif
|
||||
|
||||
// Cross-checks.
|
||||
|
||||
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
#error The allocator shim should not be compiled when building for memory tools.
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
|
||||
(defined(_MSC_VER) && defined(_CPPUNWIND))
|
||||
#error This code cannot be used when exceptions are turned on.
|
||||
#endif
|
179
src/base/allocator/allocator_shim.h
Normal file
179
src/base/allocator/allocator_shim.h
Normal file
@ -0,0 +1,179 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
// Allocator Shim API. Allows to:
|
||||
// - Configure the behavior of the allocator (what to do on OOM failures).
|
||||
// - Install new hooks (AllocatorDispatch) in the allocator chain.
|
||||
|
||||
// When this shim layer is enabled, the route of an allocation is as-follows:
|
||||
//
|
||||
// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
|
||||
// The override_* headers define the symbols required to intercept calls to
|
||||
// malloc() and operator new (if not overridden by specific C++ classes).
|
||||
//
|
||||
// [allocator_shim.cc] Routing allocation calls to the shim:
|
||||
// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
|
||||
// ShimCppNew() etc. methods defined in allocator_shim.cc.
|
||||
// These methods will: (1) forward the allocation call to the front of the
|
||||
// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
|
||||
// call std::new_handler on OOM failure).
|
||||
//
|
||||
// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
|
||||
// It is a singly linked list where each element is a struct with function
|
||||
// pointers (|malloc_function|, |free_function|, etc). Normally the chain
|
||||
// consists of a single AllocatorDispatch element, herein called
|
||||
// the "default dispatch", which is statically defined at build time and
|
||||
// ultimately routes the calls to the actual allocator defined by the build
|
||||
// config (tcmalloc, glibc, ...).
|
||||
//
|
||||
// It is possible to dynamically insert further AllocatorDispatch stages
|
||||
// to the front of the chain, for debugging / profiling purposes.
|
||||
//
|
||||
// All the functions must be thread safe. The shim does not enforce any
|
||||
// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
|
||||
// wihout introducing unnecessary perf hits.
|
||||
|
||||
struct AllocatorDispatch {
|
||||
using AllocFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocUncheckedFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AllocAlignedFn = void*(const AllocatorDispatch* self,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context);
|
||||
using ReallocFn = void*(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context);
|
||||
using FreeFn = void(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
// Returns the allocated size of user data (not including heap overhead).
|
||||
// Can be larger than the requested size.
|
||||
using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
using BatchMallocFn = unsigned(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context);
|
||||
using BatchFreeFn = void(const AllocatorDispatch* self,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context);
|
||||
using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context);
|
||||
using AlignedMallocFn = void*(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
using AlignedReallocFn = void*(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
using AlignedFreeFn = void(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
void* context);
|
||||
|
||||
AllocFn* const alloc_function;
|
||||
AllocUncheckedFn* const alloc_unchecked_function;
|
||||
AllocZeroInitializedFn* const alloc_zero_initialized_function;
|
||||
AllocAlignedFn* const alloc_aligned_function;
|
||||
ReallocFn* const realloc_function;
|
||||
FreeFn* const free_function;
|
||||
GetSizeEstimateFn* const get_size_estimate_function;
|
||||
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
|
||||
// and iOS allocators.
|
||||
BatchMallocFn* const batch_malloc_function;
|
||||
BatchFreeFn* const batch_free_function;
|
||||
FreeDefiniteSizeFn* const free_definite_size_function;
|
||||
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
|
||||
// Windows allocator.
|
||||
AlignedMallocFn* const aligned_malloc_function;
|
||||
AlignedReallocFn* const aligned_realloc_function;
|
||||
AlignedFreeFn* const aligned_free_function;
|
||||
|
||||
const AllocatorDispatch* next;
|
||||
|
||||
// |default_dispatch| is statically defined by one (and only one) of the
|
||||
// allocator_shim_default_dispatch_to_*.cc files, depending on the build
|
||||
// configuration.
|
||||
static const AllocatorDispatch default_dispatch;
|
||||
};
|
||||
|
||||
// When true makes malloc behave like new, w.r.t calling the new_handler if
|
||||
// the allocation fails (see set_new_mode() in Windows).
|
||||
BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
|
||||
|
||||
// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
|
||||
// regardless of SetCallNewHandlerOnMallocFailure().
|
||||
BASE_EXPORT void* UncheckedAlloc(size_t size);
|
||||
|
||||
// Inserts |dispatch| in front of the allocator chain. This method is
|
||||
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
|
||||
// The callers have responsibility for inserting a single dispatch no more
|
||||
// than once.
|
||||
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
|
||||
|
||||
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
|
||||
// removal of arbitrary elements from a singly linked list would require a lock
|
||||
// in malloc(), which we really don't want.
|
||||
BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(OS_WIN)
|
||||
// Configures the allocator for the caller's allocation domain. Allocations that
|
||||
// take place prior to this configuration step will succeed, but will not
|
||||
// benefit from its one-time mitigations. As such, this function must be called
|
||||
// as early as possible during startup.
|
||||
BASE_EXPORT void ConfigurePartitionAlloc();
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
void InitializeDefaultAllocatorPartitionRoot();
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// On macOS, the allocator shim needs to be turned on during runtime.
|
||||
BASE_EXPORT void InitializeAllocatorShim();
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
|
||||
|
||||
BASE_EXPORT void ReconfigurePartitionAllocLazyCommit();
|
||||
|
||||
BASE_EXPORT void ConfigurePartitionRefCountSupport(bool enable_ref_count);
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(PA_ALLOW_PCSCAN)
|
||||
BASE_EXPORT void EnablePCScan(bool dcscan);
|
||||
#endif
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
|
@ -0,0 +1,89 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <malloc.h>
|
||||
|
||||
// This translation unit defines a default dispatch for the allocator shim which
|
||||
// routes allocations to libc functions.
|
||||
// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
|
||||
|
||||
extern "C" {
|
||||
void* __libc_malloc(size_t size);
|
||||
void* __libc_calloc(size_t n, size_t size);
|
||||
void* __libc_realloc(void* address, size_t size);
|
||||
void* __libc_memalign(size_t alignment, size_t size);
|
||||
void __libc_free(void* ptr);
|
||||
} // extern "C"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return __libc_malloc(size);
|
||||
}
|
||||
|
||||
void* GlibcCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __libc_calloc(n, size);
|
||||
}
|
||||
|
||||
void* GlibcRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __libc_realloc(address, size);
|
||||
}
|
||||
|
||||
void* GlibcMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __libc_memalign(alignment, size);
|
||||
}
|
||||
|
||||
void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
__libc_free(address);
|
||||
}
|
||||
|
||||
NO_SANITIZE("cfi-icall")
|
||||
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
// glibc does not expose an alias to resolve malloc_usable_size. Dynamically
|
||||
// resolve it instead. This should be safe because glibc (and hence dlfcn)
|
||||
// does not use malloc_size internally and so there should not be a risk of
|
||||
// recursion.
|
||||
using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
|
||||
static MallocUsableSizeFunction fn_ptr =
|
||||
reinterpret_cast<MallocUsableSizeFunction>(
|
||||
dlsym(RTLD_NEXT, "malloc_usable_size"));
|
||||
|
||||
return fn_ptr(address);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&GlibcMalloc, /* alloc_function */
|
||||
&GlibcMalloc, /* alloc_unchecked_function */
|
||||
&GlibcCalloc, /* alloc_zero_initialized_function */
|
||||
&GlibcMemalign, /* alloc_aligned_function */
|
||||
&GlibcRealloc, /* realloc_function */
|
||||
&GlibcFree, /* free_function */
|
||||
&GlibcGetSizeEstimate, /* get_size_estimate_function */
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
@ -0,0 +1,77 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// This translation unit defines a default dispatch for the allocator shim which
|
||||
// routes allocations to the original libc functions when using the link-time
|
||||
// -Wl,-wrap,malloc approach (see README.md).
|
||||
// The __real_X functions here are special symbols that the linker will relocate
|
||||
// against the real "X" undefined symbol, so that __real_malloc becomes the
|
||||
// equivalent of what an undefined malloc symbol reference would have been.
|
||||
// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
|
||||
// which routes the __wrap_X functions into the shim.
|
||||
|
||||
extern "C" {
|
||||
void* __real_malloc(size_t);
|
||||
void* __real_calloc(size_t, size_t);
|
||||
void* __real_realloc(void*, size_t);
|
||||
void* __real_memalign(size_t, size_t);
|
||||
void* __real_free(void*);
|
||||
} // extern "C"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return __real_malloc(size);
|
||||
}
|
||||
|
||||
void* RealCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_calloc(n, size);
|
||||
}
|
||||
|
||||
void* RealRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_realloc(address, size);
|
||||
}
|
||||
|
||||
void* RealMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return __real_memalign(alignment, size);
|
||||
}
|
||||
|
||||
void RealFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
__real_free(address);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&RealMalloc, /* alloc_function */
|
||||
&RealMalloc, /* alloc_unchecked_function */
|
||||
&RealCalloc, /* alloc_zero_initialized_function */
|
||||
&RealMemalign, /* alloc_aligned_function */
|
||||
&RealRealloc, /* realloc_function */
|
||||
&RealFree, /* free_function */
|
||||
nullptr, /* get_size_estimate_function */
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
@ -0,0 +1,107 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/allocator_interception_mac.h"
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
namespace {
|
||||
|
||||
void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
size);
|
||||
}
|
||||
|
||||
void* CallocImpl(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
|
||||
size);
|
||||
}
|
||||
|
||||
void* MemalignImpl(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
alignment, size);
|
||||
}
|
||||
|
||||
void* ReallocImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
ptr, size);
|
||||
}
|
||||
|
||||
void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||
}
|
||||
|
||||
size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
|
||||
}
|
||||
|
||||
unsigned BatchMallocImpl(const AllocatorDispatch* self,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
return functions.batch_malloc(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
|
||||
num_requested);
|
||||
}
|
||||
|
||||
void BatchFreeImpl(const AllocatorDispatch* self,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
|
||||
to_be_freed, num_to_be_freed);
|
||||
}
|
||||
|
||||
void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
void* context) {
|
||||
MallocZoneFunctions& functions = GetFunctionsForZone(context);
|
||||
functions.free_definite_size(
|
||||
reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&MallocImpl, /* alloc_function */
|
||||
&MallocImpl, /* alloc_unchecked_function */
|
||||
&CallocImpl, /* alloc_zero_initialized_function */
|
||||
&MemalignImpl, /* alloc_aligned_function */
|
||||
&ReallocImpl, /* realloc_function */
|
||||
&FreeImpl, /* free_function */
|
||||
&GetSizeEstimateImpl, /* get_size_estimate_function */
|
||||
&BatchMallocImpl, /* batch_malloc_function */
|
||||
&BatchFreeImpl, /* batch_free_function */
|
||||
&FreeDefiniteSizeImpl, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
@ -0,0 +1,638 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/memory/nonscannable_memory.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/numerics/checked_math.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#if defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
namespace {
|
||||
|
||||
class SimpleScopedSpinLocker {
|
||||
public:
|
||||
explicit SimpleScopedSpinLocker(std::atomic<bool>& lock) : lock_(lock) {
|
||||
// Lock. Semantically equivalent to base::Lock::Acquire().
|
||||
bool expected = false;
|
||||
// Weak CAS since we are in a retry loop, relaxed ordering for failure since
|
||||
// in this case we don't imply any ordering.
|
||||
//
|
||||
// This matches partition_allocator/spinning_mutex.h fast path on Linux.
|
||||
while (!lock_.compare_exchange_weak(
|
||||
expected, true, std::memory_order_acquire, std::memory_order_relaxed)) {
|
||||
expected = false;
|
||||
}
|
||||
}
|
||||
|
||||
~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); }
|
||||
|
||||
private:
|
||||
std::atomic<bool>& lock_;
|
||||
};
|
||||
|
||||
// We can't use a "static local" or a base::LazyInstance, as:
|
||||
// - static local variables call into the runtime on Windows, which is not
|
||||
// prepared to handle it, as the first allocation happens during CRT init.
|
||||
// - We don't want to depend on base::LazyInstance, which may be converted to
|
||||
// static locals one day.
|
||||
//
|
||||
// Nevertheless, this provides essentially the same thing.
|
||||
template <typename T, typename Constructor>
|
||||
class LeakySingleton {
|
||||
public:
|
||||
constexpr LeakySingleton() = default;
|
||||
|
||||
ALWAYS_INLINE T* Get() {
|
||||
auto* instance = instance_.load(std::memory_order_acquire);
|
||||
if (LIKELY(instance))
|
||||
return instance;
|
||||
|
||||
return GetSlowPath();
|
||||
}
|
||||
|
||||
// Replaces the instance pointer with a new one.
|
||||
void Replace(T* new_instance) {
|
||||
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||
|
||||
// Modify under the lock to avoid race between |if (instance)| and
|
||||
// |instance_.store()| in GetSlowPath().
|
||||
instance_.store(new_instance, std::memory_order_release);
|
||||
}
|
||||
|
||||
private:
|
||||
T* GetSlowPath();
|
||||
|
||||
std::atomic<T*> instance_;
|
||||
alignas(T) uint8_t instance_buffer_[sizeof(T)];
|
||||
std::atomic<bool> initialization_lock_;
|
||||
};
|
||||
|
||||
template <typename T, typename Constructor>
|
||||
T* LeakySingleton<T, Constructor>::GetSlowPath() {
|
||||
// The instance has not been set, the proper way to proceed (correct
|
||||
// double-checked locking) is:
|
||||
//
|
||||
// auto* instance = instance_.load(std::memory_order_acquire);
|
||||
// if (!instance) {
|
||||
// ScopedLock initialization_lock;
|
||||
// root = instance_.load(std::memory_order_relaxed);
|
||||
// if (root)
|
||||
// return root;
|
||||
// instance = Create new root;
|
||||
// instance_.store(instance, std::memory_order_release);
|
||||
// return instance;
|
||||
// }
|
||||
//
|
||||
// However, we don't want to use a base::Lock here, so instead we use
|
||||
// compare-and-exchange on a lock variable, which provides the same
|
||||
// guarantees.
|
||||
SimpleScopedSpinLocker scoped_lock{initialization_lock_};
|
||||
|
||||
T* instance = instance_.load(std::memory_order_relaxed);
|
||||
// Someone beat us.
|
||||
if (instance)
|
||||
return instance;
|
||||
|
||||
instance = Constructor::New(reinterpret_cast<void*>(instance_buffer_));
|
||||
instance_.store(instance, std::memory_order_release);
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
class MainPartitionConstructor {
|
||||
public:
|
||||
static base::ThreadSafePartitionRoot* New(void* buffer) {
|
||||
constexpr base::PartitionOptions::ThreadCache thread_cache =
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
// With USE_BACKUP_REF_PTR, this partition is only temporary until a
|
||||
// BRP-enabled partition is created later. Leave the ability to have
|
||||
// a thread cache to that partition.
|
||||
base::PartitionOptions::ThreadCache::kDisabled;
|
||||
#else
|
||||
base::PartitionOptions::ThreadCache::kEnabled;
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// Other tests, such as the ThreadCache tests create a thread cache,
|
||||
// and only one is supported at a time.
|
||||
base::PartitionOptions::ThreadCache::kDisabled;
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* new_root = new (buffer) base::ThreadSafePartitionRoot({
|
||||
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||
thread_cache,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
base::PartitionOptions::RefCount::kDisallowed,
|
||||
});
|
||||
|
||||
return new_root;
|
||||
}
|
||||
};
|
||||
|
||||
LeakySingleton<base::ThreadSafePartitionRoot, MainPartitionConstructor> g_root
|
||||
CONSTINIT = {};
|
||||
base::ThreadSafePartitionRoot* Allocator() {
|
||||
return g_root.Get();
|
||||
}
|
||||
|
||||
// Original g_root_ if it was replaced by ConfigurePartitionRefCountSupport().
|
||||
std::atomic<base::ThreadSafePartitionRoot*> g_original_root(nullptr);
|
||||
|
||||
class AlignedPartitionConstructor {
|
||||
public:
|
||||
static base::ThreadSafePartitionRoot* New(void* buffer) {
|
||||
return g_root.Get();
|
||||
}
|
||||
};
|
||||
|
||||
LeakySingleton<base::ThreadSafePartitionRoot, AlignedPartitionConstructor>
|
||||
g_aligned_root CONSTINIT = {};
|
||||
|
||||
base::ThreadSafePartitionRoot* OriginalAllocator() {
|
||||
return g_original_root.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
base::ThreadSafePartitionRoot* AlignedAllocator() {
|
||||
return g_aligned_root.Get();
|
||||
}
|
||||
|
||||
#if defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
bool IsRunning32bitEmulatedOnArm64() {
|
||||
using IsWow64Process2Function = decltype(&IsWow64Process2);
|
||||
|
||||
IsWow64Process2Function is_wow64_process2 =
|
||||
reinterpret_cast<IsWow64Process2Function>(::GetProcAddress(
|
||||
::GetModuleHandleA("kernel32.dll"), "IsWow64Process2"));
|
||||
if (!is_wow64_process2)
|
||||
return false;
|
||||
USHORT process_machine;
|
||||
USHORT native_machine;
|
||||
bool retval = is_wow64_process2(::GetCurrentProcess(), &process_machine,
|
||||
&native_machine);
|
||||
if (!retval)
|
||||
return false;
|
||||
if (native_machine == IMAGE_FILE_MACHINE_ARM64)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
// The number of bytes to add to every allocation. Ordinarily zero, but set to 8
|
||||
// when emulating an x86 on ARM64 to avoid a bug in the Windows x86 emulator.
|
||||
size_t g_extra_bytes;
|
||||
#endif // defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
|
||||
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
|
||||
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
|
||||
#if defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
|
||||
#else // defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
return size;
|
||||
#endif // defined(OS_WIN) && defined(ARCH_CPU_X86)
|
||||
}
|
||||
|
||||
void* AllocateAlignedMemory(size_t alignment, size_t size) {
|
||||
// Memory returned by the regular allocator *always* respects |kAlignment|,
|
||||
// which is a power of two, and any valid alignment is also a power of two. So
|
||||
// we can directly fulfill these requests with the main allocator.
|
||||
//
|
||||
// This has several advantages:
|
||||
// - The thread cache is supported on the main partition
|
||||
// - Reduced fragmentation
|
||||
// - Better coverage for MiraclePtr variants requiring extras
|
||||
//
|
||||
// There are several call sites in Chromium where base::AlignedAlloc is called
|
||||
// with a small alignment. Some may be due to overly-careful code, some are
|
||||
// because the client code doesn't know the required alignment at compile
|
||||
// time.
|
||||
//
|
||||
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for
|
||||
// instance) directly call PartitionFree(), so there is no risk of
|
||||
// mismatch. (see below the default_dispatch definition).
|
||||
if (alignment <= base::kAlignment) {
|
||||
// This is mandated by |posix_memalign()| and friends, so should never fire.
|
||||
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
// TODO(bartekn): See if the compiler optimizes branches down the stack on
|
||||
// Mac, where PartitionPageSize() isn't constexpr.
|
||||
return Allocator()->AllocFlagsNoHooks(0, size, base::PartitionPageSize());
|
||||
}
|
||||
|
||||
return AlignedAllocator()->AlignedAllocFlags(base::PartitionAllocNoHooks,
|
||||
alignment, size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return Allocator()->AllocFlagsNoHooks(0, MaybeAdjustSize(size),
|
||||
PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionMallocUnchecked(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return Allocator()->AllocFlagsNoHooks(base::PartitionAllocReturnNull,
|
||||
MaybeAdjustSize(size),
|
||||
PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionCalloc(const AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context) {
|
||||
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
|
||||
return Allocator()->AllocFlagsNoHooks(base::PartitionAllocZeroFill, total,
|
||||
PartitionPageSize());
|
||||
}
|
||||
|
||||
void* PartitionMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return AllocateAlignedMemory(alignment, size);
|
||||
}
|
||||
|
||||
void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
return AllocateAlignedMemory(alignment, size);
|
||||
}
|
||||
|
||||
// aligned_realloc documentation is
|
||||
// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc
|
||||
// TODO(tasak): Expand the given memory block to the given size if possible.
|
||||
// This realloc always free the original memory block and allocates a new memory
|
||||
// block.
|
||||
// TODO(tasak): Implement PartitionRoot<thread_safe>::AlignedReallocFlags and
|
||||
// use it.
|
||||
void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
void* new_ptr = nullptr;
|
||||
if (size > 0) {
|
||||
size = MaybeAdjustSize(size);
|
||||
new_ptr = AllocateAlignedMemory(alignment, size);
|
||||
} else {
|
||||
// size == 0 and address != null means just "free(address)".
|
||||
if (address)
|
||||
base::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
// The original memory block (specified by address) is unchanged if ENOMEM.
|
||||
if (!new_ptr)
|
||||
return nullptr;
|
||||
// TODO(tasak): Need to compare the new alignment with the address' alignment.
|
||||
// If the two alignments are not the same, need to return nullptr with EINVAL.
|
||||
if (address) {
|
||||
size_t usage = base::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||
size_t copy_size = usage > size ? size : usage;
|
||||
memcpy(new_ptr, address, copy_size);
|
||||
|
||||
base::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void* PartitionRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
#if defined(OS_APPLE)
|
||||
if (UNLIKELY(!base::IsManagedByPartitionAlloc(address) && address)) {
|
||||
// A memory region allocated by the system allocator is passed in this
|
||||
// function. Forward the request to `realloc` which supports zone-
|
||||
// dispatching so that it appropriately selects the right zone.
|
||||
return realloc(address, size);
|
||||
}
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
return Allocator()->ReallocFlags(base::PartitionAllocNoHooks, address,
|
||||
MaybeAdjustSize(size), "");
|
||||
}
|
||||
|
||||
void PartitionFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
#if defined(OS_APPLE)
|
||||
if (UNLIKELY(!base::IsManagedByPartitionAlloc(address) && address)) {
|
||||
// A memory region allocated by the system allocator is passed in this
|
||||
// function. Forward the request to `free` which supports zone-
|
||||
// dispatching so that it appropriately selects the right zone.
|
||||
return free(address);
|
||||
}
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
base::ThreadSafePartitionRoot::FreeNoHooks(address);
|
||||
}
|
||||
|
||||
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
PA_DCHECK(address);
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
if (!base::IsManagedByPartitionAlloc(address)) {
|
||||
// The object pointed to by `address` is not allocated by the
|
||||
// PartitionAlloc. The return value `0` means that the pointer does not
|
||||
// belong to this malloc zone.
|
||||
return 0;
|
||||
}
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
// TODO(lizeb): Returns incorrect values for aligned allocations.
|
||||
const size_t size = base::ThreadSafePartitionRoot::GetUsableSize(address);
|
||||
#if defined(OS_APPLE)
|
||||
// The object pointed to by `address` is allocated by the PartitionAlloc.
|
||||
// So, this function must not return zero so that the malloc zone dispatcher
|
||||
// finds the appropriate malloc zone.
|
||||
PA_DCHECK(size);
|
||||
#endif // defined(OS_APPLE)
|
||||
return size;
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
|
||||
return ::Allocator();
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
|
||||
return ::OriginalAllocator();
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
|
||||
return ::AlignedAllocator();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void EnablePartitionAllocMemoryReclaimer() {
|
||||
// Unlike other partitions, Allocator() and AlignedAllocator() do not register
|
||||
// their PartitionRoots to the memory reclaimer, because doing so may allocate
|
||||
// memory. Thus, the registration to the memory reclaimer has to be done
|
||||
// some time later, when the main root is fully configured.
|
||||
// TODO(bartekn): Aligned allocator can use the regular initialization path.
|
||||
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(Allocator());
|
||||
auto* original_root = OriginalAllocator();
|
||||
if (original_root)
|
||||
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(original_root);
|
||||
if (AlignedAllocator() != Allocator()) {
|
||||
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
|
||||
AlignedAllocator());
|
||||
}
|
||||
}
|
||||
|
||||
void ReconfigurePartitionAllocLazyCommit() {
|
||||
// Unlike other partitions, Allocator() and AlignedAllocator() do not
|
||||
// configure lazy commit upfront, because it uses base::Feature, which in turn
|
||||
// allocates memory. Thus, lazy commit configuration has to be done after
|
||||
// base::FeatureList is initialized.
|
||||
// TODO(bartekn): Aligned allocator can use the regular initialization path.
|
||||
Allocator()->ConfigureLazyCommit();
|
||||
auto* original_root = OriginalAllocator();
|
||||
if (original_root)
|
||||
original_root->ConfigureLazyCommit();
|
||||
AlignedAllocator()->ConfigureLazyCommit();
|
||||
}
|
||||
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
alignas(base::ThreadSafePartitionRoot) uint8_t
|
||||
g_allocator_buffer_for_ref_count_config[sizeof(
|
||||
base::ThreadSafePartitionRoot)];
|
||||
|
||||
#if BUILDFLAG(USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP)
|
||||
alignas(base::ThreadSafePartitionRoot) uint8_t
|
||||
g_allocator_buffer_for_aligned_alloc_partition[sizeof(
|
||||
base::ThreadSafePartitionRoot)];
|
||||
#endif
|
||||
|
||||
void ConfigurePartitionRefCountSupport(bool enable_ref_count) {
|
||||
auto* current_root = g_root.Get();
|
||||
// Call Get() to ensure g_aligned_root gets initialized. In some cases it is
|
||||
// initialized with g_root, and we want to make sure it is the pre-swap
|
||||
// value (unless explicitly overwritten below).
|
||||
auto* current_aligned_root = g_aligned_root.Get();
|
||||
|
||||
current_root->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
|
||||
PartitionPurgeDiscardUnusedSystemPages);
|
||||
|
||||
const bool allow_aligned_alloc_in_main_root =
|
||||
#if BUILDFLAG(USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP)
|
||||
// If ref-count is to be enabled, this partition can't support
|
||||
// AlignedAlloc. Instead, a new one is created below. Otherwise,
|
||||
// no separate AlignedAlloc partition is created, so this one must
|
||||
// support it.
|
||||
!enable_ref_count;
|
||||
#else
|
||||
// No separate AlignedAlloc partition is created, so this one must
|
||||
// support it.
|
||||
true;
|
||||
#endif
|
||||
// TODO(bartekn): When enable_ref_count is false, simply enable thread cache
|
||||
// in the existing root instead of creating a new one -- the only difference
|
||||
// between the current and new partition is the thread cache setting.
|
||||
auto* new_root = new (g_allocator_buffer_for_ref_count_config)
|
||||
base::ThreadSafePartitionRoot({
|
||||
allow_aligned_alloc_in_main_root
|
||||
? base::PartitionOptions::AlignedAlloc::kAllowed
|
||||
: base::PartitionOptions::AlignedAlloc::kDisallowed,
|
||||
base::PartitionOptions::ThreadCache::kEnabled,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
enable_ref_count ? base::PartitionOptions::RefCount::kAllowed
|
||||
: base::PartitionOptions::RefCount::kDisallowed,
|
||||
});
|
||||
g_root.Replace(new_root);
|
||||
// g_original_root has to be set after g_root, because other code doesn't
|
||||
// handle well both pointing to the same root.
|
||||
// TODO(bartekn): Reorder, once handled well. It isn't ideal for one
|
||||
// partition to be invisible temporarily.
|
||||
// TODO(bartekn): Move current_root->PurgeMemory after the replacement.
|
||||
g_original_root = current_root;
|
||||
|
||||
base::ThreadSafePartitionRoot* new_aligned_root =
|
||||
#if BUILDFLAG(USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP)
|
||||
enable_ref_count
|
||||
// If BRP is getting enabled, we need to create a new AlignedAlloc
|
||||
// partition now.
|
||||
// TODO(bartekn): Use the original root instead of creating a new one.
|
||||
// It'd result in one less partition, but come at a cost of
|
||||
// commingling types.
|
||||
? new (g_allocator_buffer_for_aligned_alloc_partition)
|
||||
base::ThreadSafePartitionRoot({
|
||||
base::PartitionOptions::AlignedAlloc::kAllowed,
|
||||
base::PartitionOptions::ThreadCache::kDisabled,
|
||||
base::PartitionOptions::Quarantine::kAllowed,
|
||||
base::PartitionOptions::Cookie::kAllowed,
|
||||
base::PartitionOptions::RefCount::kDisallowed,
|
||||
})
|
||||
// Otherwise, the new main root can also support AlignedAlloc.
|
||||
: g_root.Get();
|
||||
PA_CHECK(current_aligned_root == g_original_root);
|
||||
#else // USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP
|
||||
// The new main root can also support AlignedAlloc.
|
||||
g_root.Get();
|
||||
PA_CHECK(current_aligned_root == g_original_root);
|
||||
#endif // USE_DEDICATED_PARTITION_FOR_ALIGNED_ALLOC_UPON_ENABLING_BRP
|
||||
g_aligned_root.Replace(new_aligned_root);
|
||||
// No need for g_original_aligned_root, because in cases where g_aligned_root
|
||||
// is replaced, it must've been g_original_root.
|
||||
}
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
void EnablePCScan(bool dcscan) {
|
||||
internal::PCScan::Initialize(
|
||||
dcscan ? internal::PCScan::WantedWriteProtectionMode::kEnabled
|
||||
: internal::PCScan::WantedWriteProtectionMode::kDisabled);
|
||||
internal::PCScan::RegisterScannableRoot(Allocator());
|
||||
if (Allocator() != AlignedAllocator())
|
||||
internal::PCScan::RegisterScannableRoot(AlignedAllocator());
|
||||
internal::NonScannableAllocator::Instance().EnablePCScan();
|
||||
}
|
||||
#endif // defined(PA_ALLOW_PCSCAN)
|
||||
|
||||
#if defined(OS_WIN)
|
||||
// Call this as soon as possible during startup.
|
||||
void ConfigurePartitionAlloc() {
|
||||
#if defined(ARCH_CPU_X86)
|
||||
if (IsRunning32bitEmulatedOnArm64())
|
||||
g_extra_bytes = 8;
|
||||
#endif // defined(ARCH_CPU_X86)
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&base::internal::PartitionMalloc, // alloc_function
|
||||
&base::internal::PartitionMallocUnchecked, // alloc_unchecked_function
|
||||
&base::internal::PartitionCalloc, // alloc_zero_initialized_function
|
||||
&base::internal::PartitionMemalign, // alloc_aligned_function
|
||||
&base::internal::PartitionRealloc, // realloc_function
|
||||
&base::internal::PartitionFree, // free_function
|
||||
&base::internal::PartitionGetSizeEstimate, // get_size_estimate_function
|
||||
nullptr, // batch_malloc_function
|
||||
nullptr, // batch_free_function
|
||||
nullptr, // free_definite_size_function
|
||||
&base::internal::PartitionAlignedAlloc, // aligned_malloc_function
|
||||
&base::internal::PartitionAlignedRealloc, // aligned_realloc_function
|
||||
&base::internal::PartitionFree, // aligned_free_function
|
||||
nullptr, // next
|
||||
};
|
||||
|
||||
// Intercept diagnostics symbols as well, even though they are not part of the
|
||||
// unified shim layer.
|
||||
//
|
||||
// TODO(lizeb): Implement the ones that doable.
|
||||
|
||||
extern "C" {
|
||||
|
||||
#if !defined(OS_APPLE)
|
||||
|
||||
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // !defined(OS_APPLE)
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
|
||||
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
|
||||
base::SimplePartitionStatsDumper allocator_dumper;
|
||||
Allocator()->DumpStats("malloc", true, &allocator_dumper);
|
||||
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
|
||||
|
||||
base::SimplePartitionStatsDumper aligned_allocator_dumper;
|
||||
if (AlignedAllocator() != Allocator()) {
|
||||
AlignedAllocator()->DumpStats("posix_memalign", true,
|
||||
&aligned_allocator_dumper);
|
||||
}
|
||||
|
||||
// Dump stats for nonscannable allocators.
|
||||
auto& nonscannable_allocator =
|
||||
base::internal::NonScannableAllocator::Instance();
|
||||
base::SimplePartitionStatsDumper nonscannable_allocator_dumper;
|
||||
if (auto* nonscannable_root = nonscannable_allocator.root())
|
||||
nonscannable_root->DumpStats("malloc", true,
|
||||
&nonscannable_allocator_dumper);
|
||||
|
||||
struct mallinfo info = {0};
|
||||
info.arena = 0; // Memory *not* allocated with mmap().
|
||||
|
||||
// Memory allocated with mmap(), aka virtual size.
|
||||
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
|
||||
aligned_allocator_dumper.stats().total_mmapped_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_mmapped_bytes;
|
||||
// Resident bytes.
|
||||
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
|
||||
aligned_allocator_dumper.stats().total_resident_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_resident_bytes;
|
||||
// Allocated bytes.
|
||||
info.uordblks = allocator_dumper.stats().total_active_bytes +
|
||||
aligned_allocator_dumper.stats().total_active_bytes +
|
||||
nonscannable_allocator_dumper.stats().total_active_bytes;
|
||||
|
||||
return info;
|
||||
}
|
||||
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
void InitializeDefaultAllocatorPartitionRoot() {
|
||||
// On OS_APPLE, the initialization of PartitionRoot uses memory allocations
|
||||
// internally, e.g. __builtin_available, and it's not easy to avoid it.
|
||||
// Thus, we initialize the PartitionRoot with using the system default
|
||||
// allocator before we intercept the system default allocator.
|
||||
ignore_result(Allocator());
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
@ -0,0 +1,73 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class BASE_EXPORT PartitionAllocMalloc {
|
||||
public:
|
||||
static ThreadSafePartitionRoot* Allocator();
|
||||
// May return |nullptr|, will never return the same pointer as |Allocator()|.
|
||||
static ThreadSafePartitionRoot* OriginalAllocator();
|
||||
// May return the same pointer as |Allocator()|.
|
||||
static ThreadSafePartitionRoot* AlignedAllocator();
|
||||
};
|
||||
|
||||
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionMallocUnchecked(
|
||||
const base::allocator::AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionCalloc(const base::allocator::AllocatorDispatch*,
|
||||
size_t n,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionMemalign(const base::allocator::AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionAlignedAlloc(
|
||||
const base::allocator::AllocatorDispatch* dispatch,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionAlignedRealloc(
|
||||
const base::allocator::AllocatorDispatch* dispatch,
|
||||
void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void* PartitionRealloc(const base::allocator::AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT void PartitionFree(const base::allocator::AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context);
|
||||
|
||||
BASE_EXPORT size_t
|
||||
PartitionGetSizeEstimate(const base::allocator::AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_PARTITION_ALLOC_H_
|
@ -0,0 +1,89 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
#include "third_party/tcmalloc/chromium/src/config.h"
|
||||
#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* TCMalloc(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return tc_malloc(size);
|
||||
}
|
||||
|
||||
void* TCMallocUnchecked(const AllocatorDispatch*, size_t size, void* context) {
|
||||
return tc_malloc_skip_new_handler(size);
|
||||
}
|
||||
|
||||
void* TCCalloc(const AllocatorDispatch*, size_t n, size_t size, void* context) {
|
||||
return tc_calloc(n, size);
|
||||
}
|
||||
|
||||
void* TCMemalign(const AllocatorDispatch*,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return tc_memalign(alignment, size);
|
||||
}
|
||||
|
||||
void* TCRealloc(const AllocatorDispatch*,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return tc_realloc(address, size);
|
||||
}
|
||||
|
||||
void TCFree(const AllocatorDispatch*, void* address, void* context) {
|
||||
tc_free(address);
|
||||
}
|
||||
|
||||
size_t TCGetSizeEstimate(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
return tc_malloc_size(address);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&TCMalloc, /* alloc_function */
|
||||
&TCMallocUnchecked, /* alloc_unchecked_function */
|
||||
&TCCalloc, /* alloc_zero_initialized_function */
|
||||
&TCMemalign, /* alloc_aligned_function */
|
||||
&TCRealloc, /* realloc_function */
|
||||
&TCFree, /* free_function */
|
||||
&TCGetSizeEstimate, /* get_size_estimate_function */
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
nullptr, /* aligned_malloc_function */
|
||||
nullptr, /* aligned_realloc_function */
|
||||
nullptr, /* aligned_free_function */
|
||||
nullptr, /* next */
|
||||
};
|
||||
|
||||
// In the case of tcmalloc we have also to route the diagnostic symbols,
|
||||
// which are not part of the unified shim layer, to tcmalloc for consistency.
|
||||
|
||||
extern "C" {
|
||||
|
||||
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {
|
||||
return tc_malloc_stats();
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
|
||||
return tc_mallopt(cmd, value);
|
||||
}
|
||||
|
||||
#ifdef HAVE_STRUCT_MALLINFO
|
||||
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
|
||||
return tc_mallinfo();
|
||||
}
|
||||
#endif
|
||||
|
||||
} // extern "C"
|
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
106
src/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/allocator_shim.h"
|
||||
|
||||
#include <ostream>
|
||||
|
||||
#include "base/allocator/winheap_stubs_win.h"
|
||||
#include "base/check.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using base::allocator::AllocatorDispatch;
|
||||
|
||||
void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapMalloc(size);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
|
||||
size_t n,
|
||||
size_t elem_size,
|
||||
void* context) {
|
||||
// Overflow check.
|
||||
const size_t size = n * elem_size;
|
||||
if (elem_size != 0 && size / elem_size != n)
|
||||
return nullptr;
|
||||
|
||||
void* result = DefaultWinHeapMallocImpl(self, size, context);
|
||||
if (result) {
|
||||
memset(result, 0, size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
|
||||
size_t alignment,
|
||||
size_t size,
|
||||
void* context) {
|
||||
CHECK(false) << "The windows heap does not support memalign.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
|
||||
void* address,
|
||||
size_t size,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapRealloc(address, size);
|
||||
}
|
||||
|
||||
void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
base::allocator::WinHeapFree(address);
|
||||
}
|
||||
|
||||
size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
|
||||
void* address,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapGetSizeEstimate(address);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapAlignedMalloc(size, alignment);
|
||||
}
|
||||
|
||||
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
void* context) {
|
||||
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
|
||||
}
|
||||
|
||||
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
|
||||
void* ptr,
|
||||
void* context) {
|
||||
base::allocator::WinHeapAlignedFree(ptr);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Guarantee that default_dispatch is compile-time initialized to avoid using
|
||||
// it before initialization (allocations before main in release builds with
|
||||
// optimizations disabled).
|
||||
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
|
||||
&DefaultWinHeapMallocImpl,
|
||||
&DefaultWinHeapMallocImpl, /* alloc_unchecked_function */
|
||||
&DefaultWinHeapCallocImpl,
|
||||
&DefaultWinHeapMemalignImpl,
|
||||
&DefaultWinHeapReallocImpl,
|
||||
&DefaultWinHeapFreeImpl,
|
||||
&DefaultWinHeapGetSizeEstimateImpl,
|
||||
nullptr, /* batch_malloc_function */
|
||||
nullptr, /* batch_free_function */
|
||||
nullptr, /* free_definite_size_function */
|
||||
&DefaultWinHeapAlignedMallocImpl,
|
||||
&DefaultWinHeapAlignedReallocImpl,
|
||||
&DefaultWinHeapAlignedFreeImpl,
|
||||
nullptr, /* next */
|
||||
};
|
53
src/base/allocator/allocator_shim_internals.h
Normal file
53
src/base/allocator/allocator_shim_internals.h
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
||||
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
||||
#if defined(OS_POSIX)
|
||||
#include <sys/cdefs.h> // for __THROW
|
||||
#endif
|
||||
|
||||
#ifndef __THROW // Not a glibc system
|
||||
#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
|
||||
#define __THROW _NOEXCEPT
|
||||
#else
|
||||
#define __THROW
|
||||
#endif // !_NOEXCEPT
|
||||
#endif
|
||||
|
||||
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
|
||||
//
|
||||
// If an exported symbol is linked into a DSO, it may be preempted by a
|
||||
// definition in the main executable. If this happens to an allocator symbol, it
|
||||
// will mean that the DSO will use the main executable's allocator. This is
|
||||
// normally relatively harmless -- regular allocations should all use the same
|
||||
// allocator, but if the DSO tries to hook the allocator it will not see any
|
||||
// allocations.
|
||||
//
|
||||
// However, if LLVM LTO is enabled, the compiler may inline the shim layer
|
||||
// symbols into callers. The end result is that allocator calls in DSOs may use
|
||||
// either the main executable's allocator or the DSO's allocator, depending on
|
||||
// whether the call was inlined. This is arguably a bug in LLVM caused by its
|
||||
// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
|
||||
// To work around the bug we use noinline to prevent the symbols from being
|
||||
// inlined.
|
||||
//
|
||||
// In the long run we probably want to avoid linking the allocator bits into
|
||||
// DSOs altogether. This will save a little space and stop giving DSOs the false
|
||||
// impression that they can hook the allocator.
|
||||
#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
|
||||
|
||||
#elif defined(OS_WIN) // __GNUC__
|
||||
|
||||
#define __THROW
|
||||
#define SHIM_ALWAYS_EXPORT __declspec(noinline)
|
||||
|
||||
#endif // __GNUC__
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
|
152
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
152
src/base/allocator/allocator_shim_override_cpp_symbols.h
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
|
||||
|
||||
// Preempt the default new/delete C++ symbols so they call the shim entry
|
||||
// points. This file is strongly inspired by tcmalloc's
|
||||
// libc_override_redefine.h.
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// std::align_val_t isn't available until C++17, but we want to override aligned
|
||||
// new/delete anyway to prevent a possible situation where a library gets loaded
|
||||
// in that uses the aligned operators. We want to avoid a situation where
|
||||
// separate heaps are used.
|
||||
// TODO(thomasanderson): Remove this once building with C++17 or later.
|
||||
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
|
||||
#define ALIGN_VAL_T std::align_val_t
|
||||
#define ALIGN_LINKAGE
|
||||
#define ALIGN_NEW operator new
|
||||
#define ALIGN_NEW_NOTHROW operator new
|
||||
#define ALIGN_DEL operator delete
|
||||
#define ALIGN_DEL_SIZED operator delete
|
||||
#define ALIGN_DEL_NOTHROW operator delete
|
||||
#define ALIGN_NEW_ARR operator new[]
|
||||
#define ALIGN_NEW_ARR_NOTHROW operator new[]
|
||||
#define ALIGN_DEL_ARR operator delete[]
|
||||
#define ALIGN_DEL_ARR_SIZED operator delete[]
|
||||
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
|
||||
#else
|
||||
#define ALIGN_VAL_T size_t
|
||||
#define ALIGN_LINKAGE extern "C"
|
||||
#if defined(OS_WIN)
|
||||
#error "Mangling is different on these platforms."
|
||||
#else
|
||||
#define ALIGN_NEW _ZnwmSt11align_val_t
|
||||
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_DEL _ZdlPvSt11align_val_t
|
||||
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
|
||||
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
|
||||
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
|
||||
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
|
||||
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
|
||||
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
|
||||
#endif
|
||||
#endif
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new(size_t size) {
|
||||
return ShimCppNew(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new[](size_t size) {
|
||||
return ShimCppNew(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete[](void* p,
|
||||
const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete(void* p, size_t) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void operator delete[](void* p, size_t) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW(std::size_t size,
|
||||
ALIGN_VAL_T alignment) {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_NOTHROW(
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T alignment,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL(void* p, ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_SIZED(void* p,
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void
|
||||
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_ARR(std::size_t size,
|
||||
ALIGN_VAL_T alignment) {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T alignment,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_ARR(void* p,
|
||||
ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void ALIGN_DEL_ARR_SIZED(void* p,
|
||||
std::size_t size,
|
||||
ALIGN_VAL_T) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
||||
|
||||
ALIGN_LINKAGE SHIM_ALWAYS_EXPORT void
|
||||
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
|
||||
ShimCppDelete(p);
|
||||
}
|
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
119
src/base/allocator/allocator_shim_override_glibc_weak_symbols.h
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
|
||||
|
||||
// Alias the internal Glibc symbols to the shim entry points.
|
||||
// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
|
||||
// Effectively this file does two things:
|
||||
// 1) Re-define the __malloc_hook & co symbols. Those symbols are defined as
|
||||
// weak in glibc and are meant to be defined strongly by client processes
|
||||
// to hook calls initiated from within glibc.
|
||||
// 2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
|
||||
// is that in the past (in RedHat 9) we had instances of libraries that were
|
||||
// allocating via malloc() and freeing using __libc_free().
|
||||
// See tcmalloc's libc_override_glibc.h for more context.
|
||||
|
||||
#include <features.h> // for __GLIBC__
|
||||
#include <malloc.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
|
||||
#if !defined(__MALLOC_HOOK_VOLATILE)
|
||||
#define MALLOC_HOOK_MAYBE_VOLATILE /**/
|
||||
#else
|
||||
#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
|
||||
// 1) Re-define malloc_hook weak symbols.
|
||||
namespace {
|
||||
|
||||
void* GlibcMallocHook(size_t size, const void* caller) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
void GlibcFreeHook(void* ptr, const void* caller) {
|
||||
return ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
|
||||
return ShimMemalign(align, size, nullptr);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
__attribute__((visibility("default"))) void* (
|
||||
*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
|
||||
const void*) = &GlibcMallocHook;
|
||||
|
||||
__attribute__((visibility("default"))) void* (
|
||||
*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
|
||||
&GlibcReallocHook;
|
||||
|
||||
__attribute__((visibility("default"))) void (
|
||||
*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
|
||||
const void*) = &GlibcFreeHook;
|
||||
|
||||
__attribute__((visibility("default"))) void* (
|
||||
*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
|
||||
&GlibcMemalignHook;
|
||||
|
||||
// 2) Redefine libc symbols themselves.
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
|
||||
return ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
|
||||
return ShimMemalign(align, s, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
|
||||
return ShimPvalloc(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
|
||||
return ShimPosixMemalign(r, a, s);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
// Safety check.
|
||||
#if !defined(__GLIBC__)
|
||||
#error The target platform does not seem to use Glibc. Disable the allocator \
|
||||
shim by setting use_allocator_shim=false in GN args.
|
||||
#endif
|
77
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
77
src/base/allocator/allocator_shim_override_libc_symbols.h
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Its purpose is to preempt the Libc symbols for malloc/new so they call the
|
||||
// shim layer entry points.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#include <malloc/malloc.h>
|
||||
#else
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
|
||||
return ShimMemalign(align, s, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* aligned_alloc(size_t align, size_t s) __THROW {
|
||||
return ShimMemalign(align, s, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
|
||||
return ShimPvalloc(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
|
||||
return ShimPosixMemalign(r, a, s);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT size_t malloc_size(const void* address) __THROW {
|
||||
return ShimGetSizeEstimate(address, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
|
||||
return ShimGetSizeEstimate(address, nullptr);
|
||||
}
|
||||
|
||||
// The default dispatch translation unit has to define also the following
|
||||
// symbols (unless they are ultimately routed to the system symbols):
|
||||
// void malloc_stats(void);
|
||||
// int mallopt(int, int);
|
||||
// struct mallinfo mallinfo(void);
|
||||
|
||||
} // extern "C"
|
@ -0,0 +1,147 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
|
||||
|
||||
// This header overrides the __wrap_X symbols when using the link-time
|
||||
// -Wl,-wrap,malloc shim-layer approach (see README.md).
|
||||
// All references to malloc, free, etc. within the linker unit that gets the
|
||||
// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
|
||||
// linker as references to __wrap_malloc, __wrap_free, which are defined here.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
|
||||
return ShimMemalign(align, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
|
||||
size_t align,
|
||||
size_t size) {
|
||||
return ShimPosixMemalign(res, align, size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
|
||||
return ShimPvalloc(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
|
||||
return ShimRealloc(address, size, nullptr);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
const size_t kPathMaxSize = 8192;
|
||||
static_assert(kPathMaxSize >= PATH_MAX, "");
|
||||
|
||||
extern char* __wrap_strdup(const char* str);
|
||||
|
||||
// Override <stdlib.h>
|
||||
|
||||
extern char* __real_realpath(const char* path, char* resolved_path);
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_realpath(const char* path,
|
||||
char* resolved_path) {
|
||||
if (resolved_path)
|
||||
return __real_realpath(path, resolved_path);
|
||||
|
||||
char buffer[kPathMaxSize];
|
||||
if (!__real_realpath(path, buffer))
|
||||
return nullptr;
|
||||
return __wrap_strdup(buffer);
|
||||
}
|
||||
|
||||
// Override <string.h> functions
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_strdup(const char* str) {
|
||||
std::size_t length = std::strlen(str) + 1;
|
||||
void* buffer = ShimMalloc(length, nullptr);
|
||||
if (!buffer)
|
||||
return nullptr;
|
||||
return reinterpret_cast<char*>(std::memcpy(buffer, str, length));
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_strndup(const char* str, size_t n) {
|
||||
std::size_t length = std::min(std::strlen(str), n);
|
||||
char* buffer = reinterpret_cast<char*>(ShimMalloc(length + 1, nullptr));
|
||||
if (!buffer)
|
||||
return nullptr;
|
||||
std::memcpy(buffer, str, length);
|
||||
buffer[length] = '\0';
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// Override <unistd.h>
|
||||
|
||||
extern char* __real_getcwd(char* buffer, size_t size);
|
||||
|
||||
SHIM_ALWAYS_EXPORT char* __wrap_getcwd(char* buffer, size_t size) {
|
||||
if (buffer)
|
||||
return __real_getcwd(buffer, size);
|
||||
|
||||
if (!size)
|
||||
size = kPathMaxSize;
|
||||
char local_buffer[size];
|
||||
if (!__real_getcwd(local_buffer, size))
|
||||
return nullptr;
|
||||
return __wrap_strdup(local_buffer);
|
||||
}
|
||||
|
||||
// Override stdio.h
|
||||
|
||||
// This is non-standard (_GNU_SOURCE only), but implemented by Bionic on
|
||||
// Android, and used by libc++.
|
||||
SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
|
||||
const char* fmt,
|
||||
va_list va_args) {
|
||||
constexpr int kInitialSize = 128;
|
||||
*strp = static_cast<char*>(
|
||||
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
|
||||
|
||||
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
|
||||
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
|
||||
|
||||
// Now we know the size. This is not very efficient, but we cannot really do
|
||||
// better without accessing internal libc functions, or reimplementing
|
||||
// *printf().
|
||||
//
|
||||
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
|
||||
// details.
|
||||
if (actual_size >= kInitialSize)
|
||||
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
|
||||
|
||||
return actual_size;
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT int __wrap_asprintf(char** strp, const char* fmt, ...) {
|
||||
va_list va_args;
|
||||
va_start(va_args, fmt);
|
||||
int retval = vasprintf(strp, fmt, va_args);
|
||||
va_end(va_args);
|
||||
return retval;
|
||||
}
|
||||
|
||||
} // extern "C"
|
286
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
286
src/base/allocator/allocator_shim_override_mac_default_zone.h
Normal file
@ -0,0 +1,286 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_
|
||||
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#error This header must be included iff PartitionAlloc-Everywhere is enabled.
|
||||
#endif
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/bits.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// Defined in base/allocator/partition_allocator/partition_root.cc
|
||||
void PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInChild();
|
||||
|
||||
namespace allocator {
|
||||
|
||||
namespace {
|
||||
|
||||
// malloc_introspection_t's callback functions for our own zone
|
||||
|
||||
kern_return_t MallocIntrospectionEnumerator(task_t task,
|
||||
void*,
|
||||
unsigned type_mask,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
vm_range_recorder_t recorder) {
|
||||
// Should enumerate all memory regions allocated by this allocator, but not
|
||||
// implemented just because of no use case for now.
|
||||
return KERN_FAILURE;
|
||||
}
|
||||
|
||||
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
|
||||
return base::bits::AlignUp(size, base::kAlignment);
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {
|
||||
// Should check the consistency of the allocator implementing this malloc
|
||||
// zone, but not implemented just because of no use case for now.
|
||||
return true;
|
||||
}
|
||||
|
||||
void MallocIntrospectionPrint(malloc_zone_t* zone, boolean_t verbose) {
|
||||
// Should print the current states of the zone for debugging / investigation
|
||||
// purpose, but not implemented just because of no use case for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
|
||||
// Should enable logging of the activities on the given `address`, but not
|
||||
// implemented just because of no use case for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
|
||||
// Called before fork(2) to acquire the lock.
|
||||
PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
|
||||
// Called in the parent process after fork(2) to release the lock.
|
||||
PartitionAllocMallocHookOnAfterForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionStatistics(malloc_zone_t* zone,
|
||||
malloc_statistics_t* stats) {
|
||||
// Should report the memory usage correctly, but not implemented just because
|
||||
// of no use case for now.
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||
stats->size_allocated = 0; // Reserved in memory
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionZoneLocked(malloc_zone_t* zone) {
|
||||
// Should return true if the underlying PartitionRoot is locked, but not
|
||||
// implemented just because this function seems not used effectively.
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean_t MallocIntrospectionEnableDischargeChecking(malloc_zone_t* zone) {
|
||||
// 'discharge' is not supported.
|
||||
return false;
|
||||
}
|
||||
|
||||
void MallocIntrospectionDisableDischargeChecking(malloc_zone_t* zone) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionDischarge(malloc_zone_t* zone, void* memory) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionEnumerateDischargedPointers(
|
||||
malloc_zone_t* zone,
|
||||
void (^report_discharged)(void* memory, void* info)) {
|
||||
// 'discharge' is not supported.
|
||||
}
|
||||
|
||||
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
|
||||
// Called in a child process after fork(2) to re-initialize the lock.
|
||||
PartitionAllocMallocHookOnAfterForkInChild();
|
||||
}
|
||||
|
||||
void MallocIntrospectionPrintTask(task_t task,
|
||||
unsigned level,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
print_task_printer_t printer) {
|
||||
// Should print the current states of another process's zone for debugging /
|
||||
// investigation purpose, but not implemented just because of no use case
|
||||
// for now.
|
||||
}
|
||||
|
||||
void MallocIntrospectionTaskStatistics(task_t task,
|
||||
vm_address_t zone_address,
|
||||
memory_reader_t reader,
|
||||
malloc_statistics_t* stats) {
|
||||
// Should report the memory usage in another process's zone, but not
|
||||
// implemented just because of no use case for now.
|
||||
stats->blocks_in_use = 0;
|
||||
stats->size_in_use = 0;
|
||||
stats->max_size_in_use = 0; // High water mark of touched memory
|
||||
stats->size_allocated = 0; // Reserved in memory
|
||||
}
|
||||
|
||||
// malloc_zone_t's callback functions for our own zone
|
||||
|
||||
size_t MallocZoneSize(malloc_zone_t* zone, const void* ptr) {
|
||||
return ShimGetSizeEstimate(ptr, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneMalloc(malloc_zone_t* zone, size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneCalloc(malloc_zone_t* zone, size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneValloc(malloc_zone_t* zone, size_t size) {
|
||||
return ShimValloc(size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneFree(malloc_zone_t* zone, void* ptr) {
|
||||
return ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
void* MallocZoneRealloc(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneDestroy(malloc_zone_t* zone) {
|
||||
// No support to destroy the zone for now.
|
||||
}
|
||||
|
||||
void* MallocZoneMemalign(malloc_zone_t* zone, size_t alignment, size_t size) {
|
||||
return ShimMemalign(alignment, size, nullptr);
|
||||
}
|
||||
|
||||
void MallocZoneFreeDefiniteSize(malloc_zone_t* zone, void* ptr, size_t size) {
|
||||
return ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
malloc_introspection_t g_mac_malloc_introspection{};
|
||||
malloc_zone_t g_mac_malloc_zone{};
|
||||
|
||||
// Replaces the default malloc zone with our own malloc zone backed by
|
||||
// PartitionAlloc. Since we'd like to make as much code as possible to use our
|
||||
// own memory allocator (and reduce bugs caused by mixed use of the system
|
||||
// allocator and our own allocator), run the following function
|
||||
// `InitializeDefaultAllocatorPartitionRoot` with the highest priority.
|
||||
//
|
||||
// Note that, despite of the highest priority of the initialization order,
|
||||
// [NSThread init] runs before InitializeDefaultMallocZoneWithPartitionAlloc
|
||||
// unfortunately and allocates memory with the system allocator. Plus, the
|
||||
// allocated memory will be deallocated with the default zone's `free` at that
|
||||
// moment without using a zone dispatcher. Hence, our own `free` function
|
||||
// receives an address allocated by the system allocator.
|
||||
__attribute__((constructor(0))) void
|
||||
InitializeDefaultMallocZoneWithPartitionAlloc() {
|
||||
// Instantiate the existing regular and purgeable zones in order to make the
|
||||
// existing purgeable zone use the existing regular zone since PartitionAlloc
|
||||
// doesn't support a purgeable zone.
|
||||
ignore_result(malloc_default_zone());
|
||||
ignore_result(malloc_default_purgeable_zone());
|
||||
|
||||
// Initialize the default allocator's PartitionRoot with the existing zone.
|
||||
InitializeDefaultAllocatorPartitionRoot();
|
||||
|
||||
// Create our own malloc zone.
|
||||
g_mac_malloc_introspection.enumerator = MallocIntrospectionEnumerator;
|
||||
g_mac_malloc_introspection.good_size = MallocIntrospectionGoodSize;
|
||||
g_mac_malloc_introspection.check = MallocIntrospectionCheck;
|
||||
g_mac_malloc_introspection.print = MallocIntrospectionPrint;
|
||||
g_mac_malloc_introspection.log = MallocIntrospectionLog;
|
||||
g_mac_malloc_introspection.force_lock = MallocIntrospectionForceLock;
|
||||
g_mac_malloc_introspection.force_unlock = MallocIntrospectionForceUnlock;
|
||||
g_mac_malloc_introspection.statistics = MallocIntrospectionStatistics;
|
||||
g_mac_malloc_introspection.zone_locked = MallocIntrospectionZoneLocked;
|
||||
g_mac_malloc_introspection.enable_discharge_checking =
|
||||
MallocIntrospectionEnableDischargeChecking;
|
||||
g_mac_malloc_introspection.disable_discharge_checking =
|
||||
MallocIntrospectionDisableDischargeChecking;
|
||||
g_mac_malloc_introspection.discharge = MallocIntrospectionDischarge;
|
||||
g_mac_malloc_introspection.enumerate_discharged_pointers =
|
||||
MallocIntrospectionEnumerateDischargedPointers;
|
||||
g_mac_malloc_introspection.reinit_lock = MallocIntrospectionReinitLock;
|
||||
g_mac_malloc_introspection.print_task = MallocIntrospectionPrintTask;
|
||||
g_mac_malloc_introspection.task_statistics =
|
||||
MallocIntrospectionTaskStatistics;
|
||||
// `version` member indicates which APIs are supported in this zone.
|
||||
// version >= 5: memalign is supported
|
||||
// version >= 6: free_definite_size is supported
|
||||
// version >= 7: introspect's discharge family is supported
|
||||
// version >= 8: pressure_relief is supported
|
||||
// version >= 9: introspect.reinit_lock is supported
|
||||
// version >= 10: claimed_address is supported
|
||||
// version >= 11: introspect.print_task is supported
|
||||
// version >= 12: introspect.task_statistics is supported
|
||||
g_mac_malloc_zone.version = 9;
|
||||
g_mac_malloc_zone.zone_name = "PartitionAlloc";
|
||||
g_mac_malloc_zone.introspect = &g_mac_malloc_introspection;
|
||||
g_mac_malloc_zone.size = MallocZoneSize;
|
||||
g_mac_malloc_zone.malloc = MallocZoneMalloc;
|
||||
g_mac_malloc_zone.calloc = MallocZoneCalloc;
|
||||
g_mac_malloc_zone.valloc = MallocZoneValloc;
|
||||
g_mac_malloc_zone.free = MallocZoneFree;
|
||||
g_mac_malloc_zone.realloc = MallocZoneRealloc;
|
||||
g_mac_malloc_zone.destroy = MallocZoneDestroy;
|
||||
g_mac_malloc_zone.batch_malloc = nullptr;
|
||||
g_mac_malloc_zone.batch_free = nullptr;
|
||||
g_mac_malloc_zone.memalign = MallocZoneMemalign;
|
||||
g_mac_malloc_zone.free_definite_size = MallocZoneFreeDefiniteSize;
|
||||
g_mac_malloc_zone.pressure_relief = nullptr;
|
||||
g_mac_malloc_zone.claimed_address = nullptr;
|
||||
|
||||
// Make our own zone the default zone.
|
||||
vm_address_t* zones = nullptr;
|
||||
unsigned int zone_count = 0;
|
||||
kern_return_t result =
|
||||
malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||
malloc_zone_t* system_default_zone =
|
||||
reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||
// Between malloc_zone_unregister(system_default_zone) and
|
||||
// malloc_zone_register(system_default_zone), i.e. while absence of
|
||||
// system_default_zone, it's possible that another thread calls free(ptr) and
|
||||
// "no zone found" error is hit. In order to avoid this case, temporarily
|
||||
// register a copy of system_default_zone.
|
||||
malloc_zone_t system_default_zone_copy = *system_default_zone;
|
||||
// While sizeof(malloc_zone_t) is determined at compile time,
|
||||
// system_default_zone (of runtime) may support more APIs with a larger
|
||||
// malloc_zone_t. So, limit the number of supported APIs down to the
|
||||
// compile-time known ones.
|
||||
if (system_default_zone_copy.version > g_mac_malloc_zone.version)
|
||||
system_default_zone_copy.version = g_mac_malloc_zone.version;
|
||||
malloc_zone_register(&system_default_zone_copy);
|
||||
// Put our own zone at the last position, so that it promotes to the default
|
||||
// zone. The implementation logic of malloc_zone_unregister is:
|
||||
// zone_table.swap(unregistered_zone, last_zone);
|
||||
// zone_table.shrink_size_by_1();
|
||||
malloc_zone_register(&g_mac_malloc_zone);
|
||||
malloc_zone_unregister(system_default_zone);
|
||||
malloc_zone_register(system_default_zone);
|
||||
malloc_zone_unregister(&system_default_zone_copy);
|
||||
|
||||
// Confirm that our own zone is now the default zone.
|
||||
zones = nullptr;
|
||||
zone_count = 0;
|
||||
result = malloc_get_all_zones(mach_task_self(), nullptr, &zones, &zone_count);
|
||||
MACH_CHECK(result == KERN_SUCCESS, result) << "malloc_get_all_zones";
|
||||
system_default_zone = reinterpret_cast<malloc_zone_t*>(zones[0]);
|
||||
CHECK_EQ(system_default_zone, &g_mac_malloc_zone);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
60
src/base/allocator/allocator_shim_override_mac_symbols.h
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||
#error This header is meant to be included only once by allocator_shim.cc
|
||||
#endif
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
|
||||
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
|
||||
MallocZoneFunctions new_functions;
|
||||
memset(&new_functions, 0, sizeof(MallocZoneFunctions));
|
||||
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
|
||||
return ShimGetSizeEstimate(ptr, zone);
|
||||
};
|
||||
new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||
return ShimMalloc(size, zone);
|
||||
};
|
||||
new_functions.calloc = [](malloc_zone_t* zone, size_t n,
|
||||
size_t size) -> void* {
|
||||
return ShimCalloc(n, size, zone);
|
||||
};
|
||||
new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
|
||||
return ShimValloc(size, zone);
|
||||
};
|
||||
new_functions.free = [](malloc_zone_t* zone, void* ptr) {
|
||||
ShimFree(ptr, zone);
|
||||
};
|
||||
new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) -> void* {
|
||||
return ShimRealloc(ptr, size, zone);
|
||||
};
|
||||
new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
|
||||
void** results,
|
||||
unsigned num_requested) -> unsigned {
|
||||
return ShimBatchMalloc(size, results, num_requested, zone);
|
||||
};
|
||||
new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
|
||||
unsigned num_to_be_freed) -> void {
|
||||
ShimBatchFree(to_be_freed, num_to_be_freed, zone);
|
||||
};
|
||||
new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
|
||||
size_t size) -> void* {
|
||||
return ShimMemalign(alignment, size, zone);
|
||||
};
|
||||
new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
|
||||
size_t size) {
|
||||
ShimFreeDefiniteSize(ptr, size, zone);
|
||||
};
|
||||
return new_functions;
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
178
src/base/allocator/allocator_shim_override_ucrt_symbols_win.h
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This header defines symbols to override the same functions in the Visual C++
|
||||
// CRT implementation.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "base/allocator/allocator_shim_internals.h"
|
||||
|
||||
// Even though most C++ allocation operators can be left alone since the
|
||||
// interception works at a lower level, these ones should be
|
||||
// overridden. Otherwise they redirect to malloc(), which is configured to crash
|
||||
// with an OOM in failure cases, such as allocation requests that are too large.
|
||||
SHIM_ALWAYS_EXPORT void* operator new(size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
|
||||
const std::nothrow_t&) __THROW {
|
||||
return ShimCppNewNoThrow(size);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
|
||||
|
||||
namespace {
|
||||
|
||||
int win_new_mode = 0;
|
||||
|
||||
} // namespace
|
||||
|
||||
// This function behaves similarly to MSVC's _set_new_mode.
|
||||
// If flag is 0 (default), calls to malloc will behave normally.
|
||||
// If flag is 1, calls to malloc will behave like calls to new,
|
||||
// and the std_new_handler will be invoked on failure.
|
||||
// Returns the previous mode.
|
||||
//
|
||||
// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
|
||||
int _set_new_mode(int flag) {
|
||||
// The MS CRT calls this function early on in startup, so this serves as a low
|
||||
// overhead proof that the allocator shim is in place for this process.
|
||||
base::allocator::g_is_win_shim_layer_initialized = true;
|
||||
int old_mode = win_new_mode;
|
||||
win_new_mode = flag;
|
||||
|
||||
base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
|
||||
|
||||
return old_mode;
|
||||
}
|
||||
|
||||
// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
|
||||
int _query_new_mode() {
|
||||
return win_new_mode;
|
||||
}
|
||||
|
||||
// These symbols override the CRT's implementation of the same functions.
|
||||
__declspec(restrict) void* malloc(size_t size) {
|
||||
return ShimMalloc(size, nullptr);
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
ShimFree(ptr, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* realloc(void* ptr, size_t size) {
|
||||
return ShimRealloc(ptr, size, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* calloc(size_t n, size_t size) {
|
||||
return ShimCalloc(n, size, nullptr);
|
||||
}
|
||||
|
||||
// _msize() is the Windows equivalent of malloc_size().
|
||||
size_t _msize(void* memblock) {
|
||||
return ShimGetSizeEstimate(memblock, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
|
||||
return ShimAlignedMalloc(size, alignment, nullptr);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_realloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment) {
|
||||
return ShimAlignedRealloc(address, size, alignment, nullptr);
|
||||
}
|
||||
|
||||
void _aligned_free(void* address) {
|
||||
ShimAlignedFree(address, nullptr);
|
||||
}
|
||||
|
||||
// _recalloc_base is called by CRT internally.
|
||||
__declspec(restrict) void* _recalloc_base(void* block,
|
||||
size_t count,
|
||||
size_t size) {
|
||||
const size_t old_block_size = (block != nullptr) ? _msize(block) : 0;
|
||||
base::CheckedNumeric<size_t> new_block_size_checked = count;
|
||||
new_block_size_checked *= size;
|
||||
const size_t new_block_size = new_block_size_checked.ValueOrDie();
|
||||
|
||||
void* const new_block = realloc(block, new_block_size);
|
||||
|
||||
if (new_block != nullptr && old_block_size < new_block_size) {
|
||||
memset(static_cast<char*>(new_block) + old_block_size, 0,
|
||||
new_block_size - old_block_size);
|
||||
}
|
||||
|
||||
return new_block;
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _malloc_base(size_t size) {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _calloc_base(size_t n, size_t size) {
|
||||
return calloc(n, size);
|
||||
}
|
||||
|
||||
void _free_base(void* block) {
|
||||
free(block);
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _recalloc(void* block, size_t count, size_t size) {
|
||||
return _recalloc_base(block, count, size);
|
||||
}
|
||||
|
||||
// The following uncommon _aligned_* routines are not used in Chromium and have
|
||||
// been shimmed to immediately crash to ensure that implementations are added if
|
||||
// uses are introduced.
|
||||
__declspec(restrict) void* _aligned_recalloc(void* address,
|
||||
size_t num,
|
||||
size_t size,
|
||||
size_t alignment) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_realloc(void* address,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
|
||||
size_t num,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t offset) {
|
||||
CHECK(false) << "This routine has not been implemented";
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
|
23
src/base/allocator/debugallocation_shim.cc
Normal file
23
src/base/allocator/debugallocation_shim.cc
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Workaround for crosbug:629593. Using AFDO on the tcmalloc files is
|
||||
// causing problems. The tcmalloc files depend on stack layouts and
|
||||
// AFDO can mess with them. Better not to use AFDO there. This is a
|
||||
// temporary hack. We will add a mechanism in the build system to
|
||||
// avoid using -fauto-profile for tcmalloc files.
|
||||
#include "build/chromeos_buildflags.h"
|
||||
|
||||
#if !defined(__clang__) && \
|
||||
(BUILDFLAG(IS_CHROMEOS_ASH) || (__GNUC__ > 5 && __GNUC__ < 7))
|
||||
// Note that this option only seems to be available in the chromeos GCC 4.9
|
||||
// toolchain, and stock GCC 5 upto 7.
|
||||
#pragma GCC optimize ("no-auto-profile")
|
||||
#endif
|
||||
|
||||
#if defined(TCMALLOC_FOR_DEBUGALLOCATION)
|
||||
#include "third_party/tcmalloc/chromium/src/debugallocation.cc"
|
||||
#else
|
||||
#include "third_party/tcmalloc/chromium/src/tcmalloc.cc"
|
||||
#endif
|
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
119
src/base/allocator/malloc_zone_functions_mac.cc
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/malloc_zone_functions_mac.h"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "base/synchronization/lock.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||
static_assert(std::is_pod<MallocZoneFunctions>::value,
|
||||
"MallocZoneFunctions must be POD");
|
||||
|
||||
void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||
MallocZoneFunctions* functions) {
|
||||
memset(functions, 0, sizeof(MallocZoneFunctions));
|
||||
functions->malloc = zone->malloc;
|
||||
functions->calloc = zone->calloc;
|
||||
functions->valloc = zone->valloc;
|
||||
functions->free = zone->free;
|
||||
functions->realloc = zone->realloc;
|
||||
functions->size = zone->size;
|
||||
CHECK(functions->malloc && functions->calloc && functions->valloc &&
|
||||
functions->free && functions->realloc && functions->size);
|
||||
|
||||
// These functions might be nullptr.
|
||||
functions->batch_malloc = zone->batch_malloc;
|
||||
functions->batch_free = zone->batch_free;
|
||||
|
||||
if (zone->version >= 5) {
|
||||
// Not all custom malloc zones have a memalign.
|
||||
functions->memalign = zone->memalign;
|
||||
}
|
||||
if (zone->version >= 6) {
|
||||
// This may be nullptr.
|
||||
functions->free_definite_size = zone->free_definite_size;
|
||||
}
|
||||
|
||||
// Note that zone version 8 introduced a pressure relief callback, and version
|
||||
// 10 introduced a claimed address callback, but neither are allocation or
|
||||
// deallocation callbacks and so aren't important to intercept.
|
||||
|
||||
functions->context = zone;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// All modifications to g_malloc_zones are gated behind this lock.
|
||||
// Dispatch to a malloc zone does not need to acquire this lock.
|
||||
base::Lock& GetLock() {
|
||||
static base::Lock* g_lock = new base::Lock;
|
||||
return *g_lock;
|
||||
}
|
||||
|
||||
void EnsureMallocZonesInitializedLocked() {
|
||||
GetLock().AssertAcquired();
|
||||
}
|
||||
|
||||
int g_zone_count = 0;
|
||||
|
||||
bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
GetLock().AssertAcquired();
|
||||
for (int i = 0; i < g_zone_count; ++i) {
|
||||
if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool StoreMallocZone(ChromeMallocZone* zone) {
|
||||
base::AutoLock l(GetLock());
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
if (IsMallocZoneAlreadyStoredLocked(zone))
|
||||
return false;
|
||||
|
||||
if (g_zone_count == kMaxZoneCount)
|
||||
return false;
|
||||
|
||||
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
|
||||
++g_zone_count;
|
||||
|
||||
// No other thread can possibly see these stores at this point. The code that
|
||||
// reads these values is triggered after this function returns. so we want to
|
||||
// guarantee that they are committed at this stage"
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
|
||||
base::AutoLock l(GetLock());
|
||||
return IsMallocZoneAlreadyStoredLocked(zone);
|
||||
}
|
||||
|
||||
bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions) {
|
||||
return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
|
||||
}
|
||||
|
||||
int GetMallocZoneCountForTesting() {
|
||||
base::AutoLock l(GetLock());
|
||||
return g_zone_count;
|
||||
}
|
||||
|
||||
void ClearAllMallocZonesForTesting() {
|
||||
base::AutoLock l(GetLock());
|
||||
EnsureMallocZonesInitializedLocked();
|
||||
memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
|
||||
g_zone_count = 0;
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
103
src/base/allocator/malloc_zone_functions_mac.h
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||
#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
||||
|
||||
#include <malloc/malloc.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "third_party/apple_apsl/malloc.h"
|
||||
|
||||
namespace base {
|
||||
namespace allocator {
|
||||
|
||||
typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||
typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
|
||||
size_t num_items,
|
||||
size_t size);
|
||||
typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
|
||||
typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
|
||||
typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size);
|
||||
typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
|
||||
size_t alignment,
|
||||
size_t size);
|
||||
typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
|
||||
size_t size,
|
||||
void** results,
|
||||
unsigned num_requested);
|
||||
typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
|
||||
void** to_be_freed,
|
||||
unsigned num_to_be_freed);
|
||||
typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
|
||||
void* ptr,
|
||||
size_t size);
|
||||
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
|
||||
|
||||
struct MallocZoneFunctions {
|
||||
malloc_type malloc;
|
||||
calloc_type calloc;
|
||||
valloc_type valloc;
|
||||
free_type free;
|
||||
realloc_type realloc;
|
||||
memalign_type memalign;
|
||||
batch_malloc_type batch_malloc;
|
||||
batch_free_type batch_free;
|
||||
free_definite_size_type free_definite_size;
|
||||
size_fn_type size;
|
||||
const ChromeMallocZone* context;
|
||||
};
|
||||
|
||||
BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
|
||||
MallocZoneFunctions* functions);
|
||||
static constexpr int kMaxZoneCount = 30;
|
||||
BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
|
||||
|
||||
// The array g_malloc_zones stores all information about malloc zones before
|
||||
// they are shimmed. This information needs to be accessed during dispatch back
|
||||
// into the zone, and additional zones may be added later in the execution fo
|
||||
// the program, so the array needs to be both thread-safe and high-performance.
|
||||
//
|
||||
// We begin by creating an array of MallocZoneFunctions of fixed size. We will
|
||||
// never modify the container, which provides thread-safety to iterators. When
|
||||
// we want to add a MallocZoneFunctions to the container, we:
|
||||
// 1. Fill in all the fields.
|
||||
// 2. Update the total zone count.
|
||||
// 3. Insert a memory barrier.
|
||||
// 4. Insert our shim.
|
||||
//
|
||||
// Each MallocZoneFunctions is uniquely identified by |context|, which is a
|
||||
// pointer to the original malloc zone. When we wish to dispatch back to the
|
||||
// original malloc zones, we iterate through the array, looking for a matching
|
||||
// |context|.
|
||||
//
|
||||
// Most allocations go through the default allocator. We will ensure that the
|
||||
// default allocator is stored as the first MallocZoneFunctions.
|
||||
//
|
||||
// Returns whether the zone was successfully stored.
|
||||
BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
|
||||
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
|
||||
BASE_EXPORT bool DoesMallocZoneNeedReplacing(
|
||||
ChromeMallocZone* zone,
|
||||
const MallocZoneFunctions* functions);
|
||||
|
||||
BASE_EXPORT int GetMallocZoneCountForTesting();
|
||||
BASE_EXPORT void ClearAllMallocZonesForTesting();
|
||||
|
||||
inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
|
||||
for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
|
||||
if (g_malloc_zones[i].context == zone)
|
||||
return g_malloc_zones[i];
|
||||
}
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
|
65
src/base/allocator/partition_allocator/DEPS
Normal file
65
src/base/allocator/partition_allocator/DEPS
Normal file
@ -0,0 +1,65 @@
|
||||
# It's planned that PartitionAlloc will be a stand-alone third party library
|
||||
# and dependencies need to be strictly controlled and minimized.
|
||||
|
||||
noparent = True
|
||||
|
||||
include_rules = [
|
||||
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||
"+base/allocator/buildflags.h",
|
||||
"+base/base_export.h",
|
||||
"+base/bind.h",
|
||||
"+base/bits.h",
|
||||
"+base/callback.h",
|
||||
"+base/check.h",
|
||||
"+base/check_op.h",
|
||||
"+base/compiler_specific.h",
|
||||
"+base/cpu.h",
|
||||
"+base/cxx17_backports.h",
|
||||
"+base/dcheck_is_on.h",
|
||||
"+base/debug/alias.h",
|
||||
"+base/debug/proc_maps_linux.h",
|
||||
"+base/feature_list.h",
|
||||
"+base/fuchsia/fuchsia_logging.h",
|
||||
"+base/gtest_prod_util.h",
|
||||
"+base/immediate_crash.h",
|
||||
"+base/lazy_instance.h",
|
||||
"+base/location.h",
|
||||
"+base/logging.h",
|
||||
"+base/logging_buildflags.h",
|
||||
"+base/mac/foundation_util.h",
|
||||
"+base/mac/mac_util.h",
|
||||
"+base/mac/scoped_cftyperef.h",
|
||||
"+base/memory/ref_counted.h",
|
||||
"+base/memory/scoped_refptr.h",
|
||||
"+base/memory/tagging.h",
|
||||
"+base/metrics/histogram_functions.h",
|
||||
"+base/no_destructor.h",
|
||||
"+base/numerics/ranges.h",
|
||||
"+base/posix/eintr_wrapper.h",
|
||||
"+base/process/memory.h",
|
||||
"+base/rand_util.h",
|
||||
"+base/sequenced_task_runner.h",
|
||||
"+base/strings/stringprintf.h",
|
||||
"+base/synchronization/lock.h",
|
||||
"+base/sys_byteorder.h",
|
||||
"+base/system/sys_info.h",
|
||||
"+base/test/bind.h",
|
||||
"+base/test/gtest_util.h",
|
||||
"+base/test/scoped_feature_list.h",
|
||||
"+base/test/task_environment.h",
|
||||
"+base/thread_annotations.h",
|
||||
"+base/threading/platform_thread.h",
|
||||
"+base/threading/thread_task_runner_handle.h",
|
||||
"+base/time/time.h",
|
||||
"+base/time/time_override.h",
|
||||
"+base/timer/lap_timer.h",
|
||||
"+base/timer/timer.h",
|
||||
"+base/trace_event/base_tracing.h",
|
||||
"+base/win/windows_types.h",
|
||||
"+base/win/windows_version.h",
|
||||
"+build/build_config.h",
|
||||
"+build/buildflag.h",
|
||||
"+build/chromecast_buildflags.h",
|
||||
"+testing/gtest/include/gtest/gtest.h",
|
||||
"+testing/perf/perf_result_reporter.h",
|
||||
]
|
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
6
src/base/allocator/partition_allocator/DIR_METADATA
Normal file
@ -0,0 +1,6 @@
|
||||
monorail {
|
||||
component: "Blink>MemoryAllocator>Partition"
|
||||
}
|
||||
|
||||
# Also security-dev@chromium.org
|
||||
team_email: "platform-architecture-dev@chromium.org"
|
4
src/base/allocator/partition_allocator/OWNERS
Normal file
4
src/base/allocator/partition_allocator/OWNERS
Normal file
@ -0,0 +1,4 @@
|
||||
ajwong@chromium.org
|
||||
bartekn@chromium.org
|
||||
haraken@chromium.org
|
||||
lizeb@chromium.org
|
249
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
249
src/base/allocator/partition_allocator/PartitionAlloc.md
Normal file
@ -0,0 +1,249 @@
|
||||
# PartitionAlloc Design
|
||||
|
||||
This document describes PartitionAlloc at a high level, with some architectural
|
||||
details. For implementation details, see the comments in
|
||||
`partition_alloc_constants.h`.
|
||||
|
||||
## Overview
|
||||
|
||||
PartitionAlloc is a memory allocator optimized for space efficiency,
|
||||
allocation latency, and security.
|
||||
|
||||
### Core terms
|
||||
|
||||
A *partition* is a heap that is separated and protected from any other
|
||||
partitions, as well as from non-PartitionAlloc memory. The most typical use of
|
||||
partitions is to isolate certain object types. However, one can also isolate
|
||||
objects of certain sizes, or objects of a certain lifetime (as the caller
|
||||
prefers). Callers can create as many partitions as they need. The direct
|
||||
memory cost of partitions is minimal, but the implicit cost resulting from
|
||||
fragmentation is not to be underestimated.
|
||||
|
||||
Each partition holds multiple buckets. A *bucket* is a series of regions in a
|
||||
partition that contains similar-sized objects, e.g. one bucket holds sizes
|
||||
(240, 256], another (256, 288], and so on. Bucket sizes are
|
||||
geometrically-spaced, and go all the way up to `kMaxBucketed=960KiB`
|
||||
(so called *normal buckets*). There are 8 buckets between each power of two.
|
||||
Note that buckets that aren't a multiple of `base::kAlignment` can't be used.
|
||||
|
||||
Larger allocations (>`kMaxBucketed`) are realized by direct memory mapping
|
||||
(*direct map*).
|
||||
|
||||
### Performance
|
||||
|
||||
PartitionAlloc is designed to be extremely fast in its fast paths. The fast
|
||||
paths of allocation and deallocation require very few (reasonably predictable)
|
||||
branches. The number of operations in the fast paths is minimal, leading to the
|
||||
possibility of inlining.
|
||||
|
||||
However, even the fast path isn't the fastest, because it requires taking
|
||||
a per-partition lock. Although we optimized the lock, there was still room for
|
||||
improvement. Therefore we introduced the *thread cache*, which holds a small
|
||||
amount of not-too-large memory chunks, ready to be allocated. Because these
|
||||
chunks are stored per-thread, they can be allocated without a lock, only
|
||||
requiring a faster thread-local storage (TLS) lookup, improving cache locality
|
||||
in the process.
|
||||
The thread cache has been tailored to satisfy a vast majority of requests by
|
||||
allocating from and releasing memory to the main allocator in batches,
|
||||
amortizing lock acquisition and further improving locality while not trapping
|
||||
excess memory.
|
||||
|
||||
### Security
|
||||
|
||||
Security is one of the important goals of PartitionAlloc.
|
||||
|
||||
PartitionAlloc guarantees that different partitions exist in different regions
|
||||
of the process's address space. When the caller has freed all objects contained
|
||||
in a page in a partition, PartitionAlloc returns the physical memory to the
|
||||
operating system, but continues to reserve the region of address space.
|
||||
PartitionAlloc will only reuse an address space region for the same partition.
|
||||
|
||||
Similarly, one page can contain only objects from the same bucket.
|
||||
When freed, PartitionAlloc returns the physical memory, but continues to reserve
|
||||
the region for this very bucket.
|
||||
|
||||
The above techniques help avoid type confusion attacks. Note, however, these
|
||||
apply only to normal buckets and not to direct map, as it'd waste too much
|
||||
address space.
|
||||
|
||||
PartitionAlloc also guarantees that:
|
||||
|
||||
* Linear overflows/underflows cannot corrupt into, out of, or between
|
||||
partitions. There are guard pages at the beginning and the end of each memory
|
||||
region owned by a partition.
|
||||
|
||||
* Linear overflows/underflows cannot corrupt the allocation metadata.
|
||||
PartitionAlloc records metadata in a dedicated, out-of-line region (not
|
||||
adjacent to objects), surrounded by guard pages. (Freelist pointers are an
|
||||
exception.)
|
||||
|
||||
* Partial pointer overwrite of freelist pointer should fault.
|
||||
|
||||
* Direct map allocations have guard pages at the beginning and the end.
|
||||
|
||||
### Alignment
|
||||
|
||||
PartitionAlloc guarantees that returned pointers are aligned on
|
||||
`base::kAlignment` boundary (typically 16B on 64-bit systems, and 8B on 32-bit).
|
||||
|
||||
PartitionAlloc also supports higher levels of alignment, that can be requested
|
||||
via `PartitionAlloc::AlignedAllocFlags()` or platform-specific APIs (such as
|
||||
`posix_memalign()`). The requested
|
||||
alignment has to be a power of two. PartitionAlloc reserves the right to round
|
||||
up the requested size to the nearest power of two, greater than or equal to the
|
||||
requested alignment. This may be wasteful, but allows taking advantage of
|
||||
natural PartitionAlloc alignment guarantees. Allocations with an alignment
|
||||
requirement greater than `base::kAlignment` are expected to be very rare.
|
||||
|
||||
## PartitionAlloc-Everywhere
|
||||
|
||||
Originally, PartitionAlloc was used only in Blink (Chromium’s rendering engine).
|
||||
It was invoked explicitly, by calling PartitionAlloc APIs directly.
|
||||
|
||||
PartitionAlloc-Everywhere is the name of the project that brought PartitionAlloc
|
||||
to the entire-ish codebase (exclusions apply). This was done by intercepting
|
||||
`malloc()`, `free()`, `realloc()`, aforementioned `posix_memalign()`, etc. and
|
||||
routing them into PartitionAlloc. The shim located in
|
||||
`base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h` is
|
||||
responsible for intercepting. For more details, see
|
||||
[base/allocator/README.md](../../../base/allocator/README.md).
|
||||
|
||||
A special, catch-it-all *Malloc* partition has been created for the intercepted
|
||||
`malloc()` et al. This is to isolate from already existing Blink partitions.
|
||||
The only exception from that is Blink's *FastMalloc* partition, which was also
|
||||
catch-it-all in nature, so it's perfectly fine to merge these together, to
|
||||
minimize fragmentation.
|
||||
|
||||
PartitionAlloc-Everywhere was launched in M89 for Windows 64-bit and Android.
|
||||
Windows 32-bit and Linux followed it shortly after, in M90.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Many Different Flavors of Pages
|
||||
|
||||
In PartitionAlloc, by *system page* we mean a memory page as defined by CPU/OS
|
||||
(often referred to as "virtual page" out there). It is most commonly 4KiB in
|
||||
size, but depending on CPU it can be larger (PartitionAlloc supports up to
|
||||
64KiB).
|
||||
|
||||
The reason why we use the term "system page" is to disambiguate from
|
||||
*partition page*, which is the most common granularity used by PartitionAlloc.
|
||||
Each partition page consists of exactly 4 system pages.
|
||||
|
||||
A *super page* is a 2MiB region, aligned on a 2MiB boundary.
|
||||
Don't confuse it with CPU/OS terms like "large page" or "huge page", which are
|
||||
also commonly 2MiB in size. These have to be fully committed/uncommitted in
|
||||
memory, whereas super pages can be partially committed, with system page
|
||||
granularity.
|
||||
|
||||
### Slots and Spans
|
||||
|
||||
A *slot* is an indivisible allocation unit. Slot sizes are tied to buckets.
|
||||
For example each allocation that falls into the bucket (240; 256] would
|
||||
be satisfied with a slot of size 256. This applies only to normal buckets, not
|
||||
to direct map.
|
||||
|
||||
A *slot span* is just a grouping of slots of the same size next to each other
|
||||
in memory. Slot span size is a multiple of a partition page.
|
||||
|
||||
A bucket is a collection of slot spans containing slots of the same size,
|
||||
organized as linked-lists.
|
||||
|
||||
Allocations up to 4 partition pages are referred to as *small buckets*.
|
||||
In these cases, slot spans are always between 1 and 4 partition pages in size.
|
||||
The size is chosen based on the slot size, such that the rounding waste is
|
||||
minimized. For example, if the slot size was 96B and slot span was 1 partition
|
||||
page of 16KiB, 64B would be wasted at the end, but nothing is wasted if 3
|
||||
partition pages totalling 48KiB are used. Furthermore, PartitionAlloc may avoid
|
||||
waste by lowering the number of committed system pages compared to the number of
|
||||
reserved pages. For example, for the slot size of 80B we'd use a slot span of 4
|
||||
partition pages of 16KiB, i.e. 16 system pages of 4KiB, but commit only up to
|
||||
15, thus resulting in perfect packing.
|
||||
|
||||
Allocations above 4 partition pages (but ≤`kMaxBucketed`) are referred to as
|
||||
*single slot spans*. That's because each slot span is guaranteed to hold exactly
|
||||
one slot. Fun fact: there are sizes ≤4 partition pages that result in a slot
|
||||
span having exactly 1 slot, but nonetheless they're still classified as small
|
||||
buckets. The reason is that single slot spans are often handled by a different
|
||||
code path, and that distinction is made purely based on slot size, for
|
||||
simplicity and efficiency.
|
||||
|
||||
### Layout in Memory
|
||||
|
||||
PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super
|
||||
pages. Each super page is split into partition pages.
|
||||
The first and the last partition page are permanently inaccessible and serve
|
||||
as guard pages, with the exception of one system page in the middle of the first
|
||||
partition page that holds metadata (32B struct per partition page).
|
||||
|
||||
As allocation requests arrive, there is eventually a need to allocate a new slot
|
||||
span.
|
||||
Address space for such a slot span is carved out from the last super page. If
|
||||
not enough space, a new super page is allocated. Due to varying sizes of slot
|
||||
span, this may lead to leaving space unused (we never go back to fill previous
|
||||
super pages), which is fine because this memory is merely reserved, which is far
|
||||
less precious than committed memory. Note also that address space reserved for a
|
||||
slot span is never released, even if the slot span isn't used for a long time.
|
||||
|
||||
All slots in a newly allocated slot span are *free*, i.e. available for
|
||||
allocation.
|
||||
|
||||
### Freelist Pointers
|
||||
|
||||
All free slots within a slot span are chained into a singly-linked free-list,
|
||||
by writing the *next* pointer at the beginning of each slot, and the head of the
|
||||
list is written in the metadata struct.
|
||||
|
||||
However, writing a pointer in each free slot of a newly allocated span would
|
||||
require committing and faulting in physical pages upfront, which would be
|
||||
unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*.
|
||||
Only provisioned slots are chained into the freelist.
|
||||
Once provisioned slots in a span are depleted, then another page worth of slots
|
||||
is provisioned (note, a slot that crosses a page boundary only gets
|
||||
provisioned with slots of the next page). See
|
||||
`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details.
|
||||
|
||||
Freelist pointers are stored at the beginning of each free slot. As such, they
|
||||
are the only metadata that is inline, i.e. stored among the
|
||||
objects. This makes them prone to overruns. On little-endian systems, the
|
||||
pointers are encoded by reversing byte order, so that partial overruns will very
|
||||
likely result in destroying the pointer, as opposed to forming a valid pointer
|
||||
to a nearby location.
|
||||
|
||||
Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a
|
||||
different manner. This helps PartitionAlloc detect corruptions.
|
||||
|
||||
### Slot Span States
|
||||
|
||||
A slot span can be in any of 4 states:
|
||||
* *Full*. A full span has no free slots.
|
||||
* *Empty*. An empty span has no allocated slots, only free slots.
|
||||
* *Active*. An active span is anything in between the above two.
|
||||
* *Decommitted*. A decommitted span is a special case of an empty span, where
|
||||
all pages are decommitted from memory.
|
||||
|
||||
PartitionAlloc prioritizes getting an available slot from an active span, over
|
||||
an empty one, in hope that the latter can be soon transitioned into a
|
||||
decommitted state, thus releasing memory. There is no mechanism, however, to
|
||||
prioritize selection of a slot span based on the number of already allocated
|
||||
slots.
|
||||
|
||||
An empty span becomes decommitted either when there are too many empty spans
|
||||
(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in
|
||||
low memory pressure conditions). An allocation can be satisfied from
|
||||
a decommitted span if there are no active or empty spans available. The slot
|
||||
provisioning mechanism kicks back in, committing the pages gradually as needed,
|
||||
and the span becomes active. (There is currently no other way
|
||||
to unprovision slots than decommitting the entire span).
|
||||
|
||||
As mentioned above, a bucket is a collection of slot spans containing slots of
|
||||
the same size. In fact, each bucket has 3 linked-lists, chaining active, empty
|
||||
and decommitted spans (see `PartitionBucket::*_slot_spans_head`).
|
||||
There is no need for a full span list. The lists are updated lazily. An empty,
|
||||
decommitted or full span may stay on the active list for some time, until
|
||||
`PartitionBucket::SetNewActiveSlotSpan()` encounters it.
|
||||
A decommitted span may stay on the empty list for some time,
|
||||
until `PartitionBucket<thread_safe>::SlowPathAlloc()` encounters it. However,
|
||||
the inaccuracy can't happen in the other direction, i.e. an active span can only
|
||||
be on the active list, and an empty span can only be on the active or empty
|
||||
list.
|
423
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
423
src/base/allocator/partition_allocator/address_pool_manager.cc
Normal file
@ -0,0 +1,423 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/cxx17_backports.h"
|
||||
#include "base/lazy_instance.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
base::LazyInstance<AddressPoolManager>::Leaky g_address_pool_manager =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
AddressPoolManager* AddressPoolManager::GetInstance() {
|
||||
return g_address_pool_manager.Pointer();
|
||||
}
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
namespace {
|
||||
|
||||
// This will crash if the range cannot be decommitted.
|
||||
void DecommitPages(void* address, size_t size) {
|
||||
// Callers rely on the pages being zero-initialized when recommitting them.
|
||||
// |DecommitSystemPages| doesn't guarantee this on all operating systems, in
|
||||
// particular on macOS, but |DecommitAndZeroSystemPages| does.
|
||||
DecommitAndZeroSystemPages(address, size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
|
||||
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
|
||||
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
|
||||
|
||||
for (pool_handle i = 0; i < base::size(pools_); ++i) {
|
||||
if (!pools_[i].IsInitialized()) {
|
||||
pools_[i].Initialize(ptr, length);
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
PA_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AddressPoolManager::GetPoolUsedSuperPages(
|
||||
pool_handle handle,
|
||||
std::bitset<kMaxSuperPages>& used) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!pool)
|
||||
return;
|
||||
|
||||
pool->GetUsedSuperPages(used);
|
||||
}
|
||||
|
||||
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!pool)
|
||||
return 0;
|
||||
|
||||
return pool->GetBaseAddress();
|
||||
}
|
||||
|
||||
void AddressPoolManager::ResetForTesting() {
|
||||
for (pool_handle i = 0; i < base::size(pools_); ++i)
|
||||
pools_[i].Reset();
|
||||
}
|
||||
|
||||
void AddressPoolManager::Remove(pool_handle handle) {
|
||||
Pool* pool = GetPool(handle);
|
||||
PA_DCHECK(pool->IsInitialized());
|
||||
pool->Reset();
|
||||
}
|
||||
|
||||
char* AddressPoolManager::Reserve(pool_handle handle,
|
||||
void* requested_address,
|
||||
size_t length) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!requested_address)
|
||||
return reinterpret_cast<char*>(pool->FindChunk(length));
|
||||
const bool is_available = pool->TryReserveChunk(
|
||||
reinterpret_cast<uintptr_t>(requested_address), length);
|
||||
if (is_available)
|
||||
return static_cast<char*>(requested_address);
|
||||
return reinterpret_cast<char*>(pool->FindChunk(length));
|
||||
}
|
||||
|
||||
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||
void* ptr,
|
||||
size_t length) {
|
||||
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||
Pool* pool = GetPool(handle);
|
||||
PA_DCHECK(pool->IsInitialized());
|
||||
DecommitPages(ptr, length);
|
||||
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
|
||||
}
|
||||
|
||||
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
|
||||
PA_CHECK(ptr != 0);
|
||||
PA_CHECK(!(ptr & kSuperPageOffsetMask));
|
||||
PA_CHECK(!(length & kSuperPageOffsetMask));
|
||||
address_begin_ = ptr;
|
||||
#if DCHECK_IS_ON()
|
||||
address_end_ = ptr + length;
|
||||
PA_DCHECK(address_begin_ < address_end_);
|
||||
#endif
|
||||
|
||||
total_bits_ = length / kSuperPageSize;
|
||||
PA_CHECK(total_bits_ <= kMaxSuperPages);
|
||||
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
alloc_bitset_.reset();
|
||||
bit_hint_ = 0;
|
||||
}
|
||||
|
||||
bool AddressPoolManager::Pool::IsInitialized() {
|
||||
return address_begin_ != 0;
|
||||
}
|
||||
|
||||
void AddressPoolManager::Pool::Reset() {
|
||||
address_begin_ = 0;
|
||||
}
|
||||
|
||||
void AddressPoolManager::Pool::GetUsedSuperPages(
|
||||
std::bitset<kMaxSuperPages>& used) {
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
|
||||
PA_DCHECK(IsInitialized());
|
||||
used = alloc_bitset_;
|
||||
}
|
||||
|
||||
uintptr_t AddressPoolManager::Pool::GetBaseAddress() {
|
||||
PA_DCHECK(IsInitialized());
|
||||
return address_begin_;
|
||||
}
|
||||
|
||||
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
|
||||
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||
const size_t need_bits = requested_size >> kSuperPageShift;
|
||||
|
||||
// Use first-fit policy to find an available chunk from free chunks. Start
|
||||
// from |bit_hint_|, because we know there are no free chunks before.
|
||||
size_t beg_bit = bit_hint_;
|
||||
size_t curr_bit = bit_hint_;
|
||||
while (true) {
|
||||
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
|
||||
// |total_bits_|, return |nullptr| to signal no free chunk was found.
|
||||
size_t end_bit = beg_bit + need_bits;
|
||||
if (end_bit > total_bits_)
|
||||
return 0;
|
||||
|
||||
bool found = true;
|
||||
for (; curr_bit < end_bit; ++curr_bit) {
|
||||
if (alloc_bitset_.test(curr_bit)) {
|
||||
// The bit was set, so this chunk isn't entirely free. Set |found=false|
|
||||
// to ensure the outer loop continues. However, continue the inner loop
|
||||
// to set |beg_bit| just past the last set bit in the investigated
|
||||
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
|
||||
// next outer loop pass from checking the same bits.
|
||||
beg_bit = curr_bit + 1;
|
||||
found = false;
|
||||
if (bit_hint_ == curr_bit)
|
||||
++bit_hint_;
|
||||
}
|
||||
}
|
||||
|
||||
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
|
||||
// mark as allocated) and return the allocated address.
|
||||
if (found) {
|
||||
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||
PA_DCHECK(!alloc_bitset_.test(i));
|
||||
alloc_bitset_.set(i);
|
||||
}
|
||||
if (bit_hint_ == beg_bit) {
|
||||
bit_hint_ = end_bit;
|
||||
}
|
||||
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(address + requested_size <= address_end_);
|
||||
#endif
|
||||
return address;
|
||||
}
|
||||
}
|
||||
|
||||
PA_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
|
||||
size_t requested_size) {
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||
PA_DCHECK(!(requested_size & kSuperPageOffsetMask));
|
||||
const size_t begin_bit = (address - address_begin_) / kSuperPageSize;
|
||||
const size_t need_bits = requested_size / kSuperPageSize;
|
||||
const size_t end_bit = begin_bit + need_bits;
|
||||
// Check that requested address is not too high.
|
||||
if (end_bit > total_bits_)
|
||||
return false;
|
||||
// Check if any bit of the requested region is set already.
|
||||
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||
if (alloc_bitset_.test(i))
|
||||
return false;
|
||||
}
|
||||
// Otherwise, set the bits.
|
||||
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||
alloc_bitset_.set(i);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
|
||||
PA_DCHECK(!(address & kSuperPageOffsetMask));
|
||||
PA_DCHECK(!(free_size & kSuperPageOffsetMask));
|
||||
|
||||
PA_DCHECK(address_begin_ <= address);
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(address + free_size <= address_end_);
|
||||
#endif
|
||||
|
||||
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
|
||||
const size_t end_bit = beg_bit + free_size / kSuperPageSize;
|
||||
for (size_t i = beg_bit; i < end_bit; ++i) {
|
||||
PA_DCHECK(alloc_bitset_.test(i));
|
||||
alloc_bitset_.reset(i);
|
||||
}
|
||||
bit_hint_ = std::min(bit_hint_, beg_bit);
|
||||
}
|
||||
|
||||
AddressPoolManager::Pool::Pool() = default;
|
||||
AddressPoolManager::Pool::~Pool() = default;
|
||||
|
||||
#else // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
static_assert(
|
||||
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
|
||||
0,
|
||||
"kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
|
||||
static_assert(
|
||||
kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
|
||||
"kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
|
||||
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
|
||||
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||
"kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
|
||||
"kGuardOffsetOfBRPPoolBitmap.");
|
||||
|
||||
template <size_t bitsize>
|
||||
void SetBitmap(std::bitset<bitsize>& bitmap,
|
||||
size_t start_bit,
|
||||
size_t bit_length) {
|
||||
const size_t end_bit = start_bit + bit_length;
|
||||
PA_DCHECK(start_bit <= bitsize);
|
||||
PA_DCHECK(end_bit <= bitsize);
|
||||
|
||||
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||
PA_DCHECK(!bitmap.test(i));
|
||||
bitmap.set(i);
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t bitsize>
|
||||
void ResetBitmap(std::bitset<bitsize>& bitmap,
|
||||
size_t start_bit,
|
||||
size_t bit_length) {
|
||||
const size_t end_bit = start_bit + bit_length;
|
||||
PA_DCHECK(start_bit <= bitsize);
|
||||
PA_DCHECK(end_bit <= bitsize);
|
||||
|
||||
for (size_t i = start_bit; i < end_bit; ++i) {
|
||||
PA_DCHECK(bitmap.test(i));
|
||||
bitmap.reset(i);
|
||||
}
|
||||
}
|
||||
|
||||
char* AddressPoolManager::Reserve(pool_handle handle,
|
||||
void* requested_address,
|
||||
size_t length) {
|
||||
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||
char* ptr = reinterpret_cast<char*>(
|
||||
AllocPages(requested_address, length, kSuperPageSize, PageInaccessible,
|
||||
PageTag::kPartitionAlloc));
|
||||
if (UNLIKELY(!ptr))
|
||||
return nullptr;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
|
||||
void* ptr,
|
||||
size_t length) {
|
||||
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(ptr);
|
||||
PA_DCHECK(!(ptr_as_uintptr & kSuperPageOffsetMask));
|
||||
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
|
||||
FreePages(ptr, length);
|
||||
}
|
||||
|
||||
void AddressPoolManager::MarkUsed(pool_handle handle,
|
||||
const void* address,
|
||||
size_t length) {
|
||||
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
AutoLock guard(AddressPoolManagerBitmap::GetLock());
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
if (handle == kBRPPoolHandle) {
|
||||
PA_DCHECK(
|
||||
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||
|
||||
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||
// first or the last PartitionPageSize()-bytes block is given:
|
||||
//
|
||||
// ------+---+---------------+---+----
|
||||
// memory ..... | B | managed by PA | B | ...
|
||||
// regions ------+---+---------------+---+----
|
||||
//
|
||||
// B: PartitionPageSize()-bytes block. This is used internally by the
|
||||
// allocator and is not available for callers.
|
||||
//
|
||||
// This is required to avoid crash caused by the following code:
|
||||
// {
|
||||
// // Assume this allocation happens outside of PartitionAlloc.
|
||||
// raw_ptr<T> ptr = new T[20];
|
||||
// for (size_t i = 0; i < 20; i ++) { ptr++; }
|
||||
// // |ptr| may point to an address inside 'B'.
|
||||
// }
|
||||
//
|
||||
// Suppose that |ptr| points to an address inside B after the loop. If
|
||||
// IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
|
||||
// crash, since the memory is not allocated by PartitionAlloc.
|
||||
SetBitmap(
|
||||
AddressPoolManagerBitmap::brp_pool_bits_,
|
||||
(ptr_as_uintptr >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||
} else
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
{
|
||||
PA_DCHECK(handle == kNonBRPPoolHandle);
|
||||
PA_DCHECK((length %
|
||||
AddressPoolManagerBitmap::kBytesPer1BitOfNonBRPPoolBitmap) == 0);
|
||||
SetBitmap(
|
||||
AddressPoolManagerBitmap::non_brp_pool_bits_,
|
||||
ptr_as_uintptr >> AddressPoolManagerBitmap::kBitShiftOfNonBRPPoolBitmap,
|
||||
length >> AddressPoolManagerBitmap::kBitShiftOfNonBRPPoolBitmap);
|
||||
}
|
||||
}
|
||||
|
||||
void AddressPoolManager::MarkUnused(pool_handle handle,
|
||||
const void* address,
|
||||
size_t length) {
|
||||
// Address regions allocated for normal buckets are never released, so this
|
||||
// function can only be called for direct map. However, do not DCHECK on
|
||||
// IsManagedByDirectMap(address), because many tests test this function using
|
||||
// small allocations.
|
||||
|
||||
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
AutoLock guard(AddressPoolManagerBitmap::GetLock());
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
if (handle == kBRPPoolHandle) {
|
||||
PA_DCHECK(
|
||||
(length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);
|
||||
|
||||
// Make IsManagedByBRPPoolPool() return false when an address inside the
|
||||
// first or the last PartitionPageSize()-bytes block is given.
|
||||
// (See MarkUsed comment)
|
||||
ResetBitmap(
|
||||
AddressPoolManagerBitmap::brp_pool_bits_,
|
||||
(ptr_as_uintptr >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
|
||||
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
|
||||
(length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
|
||||
AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
|
||||
} else
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
{
|
||||
PA_DCHECK(handle == kNonBRPPoolHandle);
|
||||
PA_DCHECK((length %
|
||||
AddressPoolManagerBitmap::kBytesPer1BitOfNonBRPPoolBitmap) == 0);
|
||||
ResetBitmap(
|
||||
AddressPoolManagerBitmap::non_brp_pool_bits_,
|
||||
ptr_as_uintptr >> AddressPoolManagerBitmap::kBitShiftOfNonBRPPoolBitmap,
|
||||
length >> AddressPoolManagerBitmap::kBitShiftOfNonBRPPoolBitmap);
|
||||
}
|
||||
}
|
||||
|
||||
void AddressPoolManager::ResetForTesting() {
|
||||
AutoLock guard(AddressPoolManagerBitmap::GetLock());
|
||||
AddressPoolManagerBitmap::non_brp_pool_bits_.reset();
|
||||
AddressPoolManagerBitmap::brp_pool_bits_.reset();
|
||||
}
|
||||
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
AddressPoolManager::AddressPoolManager() = default;
|
||||
AddressPoolManager::~AddressPoolManager() = default;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
147
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
147
src/base/allocator/partition_allocator/address_pool_manager.h
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
||||
|
||||
#include <bitset>
|
||||
#include <limits>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
template <typename Type>
|
||||
struct LazyInstanceTraitsBase;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// (64bit version)
|
||||
// AddressPoolManager takes a reserved virtual address space and manages address
|
||||
// space allocation.
|
||||
//
|
||||
// AddressPoolManager (currently) supports up to 2 pools. Each pool manages a
|
||||
// contiguous reserved address space. Alloc() takes a pool_handle and returns
|
||||
// address regions from the specified pool. Free() also takes a pool_handle and
|
||||
// returns the address region back to the manager.
|
||||
//
|
||||
// (32bit version)
|
||||
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
|
||||
// address regions using bitmaps. IsManagedByPartitionAllocBRPPool and
|
||||
// IsManagedByPartitionAllocNonBRPPool use the bitmaps to judge whether a given
|
||||
// address is in a pool that supports BackupRefPtr or in a pool that doesn't.
|
||||
// All PartitionAlloc allocations must be in either of the pools.
|
||||
class BASE_EXPORT AddressPoolManager {
|
||||
public:
|
||||
static AddressPoolManager* GetInstance();
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
pool_handle Add(uintptr_t address, size_t length);
|
||||
void Remove(pool_handle handle);
|
||||
|
||||
// Populate a |used| bitset of superpages currently in use.
|
||||
void GetPoolUsedSuperPages(pool_handle handle,
|
||||
std::bitset<kMaxSuperPages>& used);
|
||||
|
||||
// Return the base address of a pool.
|
||||
uintptr_t GetPoolBaseAddress(pool_handle handle);
|
||||
#endif
|
||||
|
||||
// Reserves address space from GigaCage.
|
||||
char* Reserve(pool_handle handle, void* requested_address, size_t length);
|
||||
|
||||
// Frees address space back to GigaCage and decommits underlying system pages.
|
||||
void UnreserveAndDecommit(pool_handle handle, void* ptr, size_t length);
|
||||
void ResetForTesting();
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
void MarkUsed(pool_handle handle, const void* address, size_t size);
|
||||
void MarkUnused(pool_handle handle, const void* address, size_t size);
|
||||
|
||||
static bool IsManagedByNonBRPPool(const void* address) {
|
||||
return AddressPoolManagerBitmap::IsManagedByNonBRPPool(address);
|
||||
}
|
||||
|
||||
static bool IsManagedByBRPPool(const void* address) {
|
||||
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||
}
|
||||
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
private:
|
||||
friend class AddressPoolManagerForTesting;
|
||||
|
||||
AddressPoolManager();
|
||||
~AddressPoolManager();
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
class Pool {
|
||||
public:
|
||||
Pool();
|
||||
~Pool();
|
||||
|
||||
void Initialize(uintptr_t ptr, size_t length);
|
||||
bool IsInitialized();
|
||||
void Reset();
|
||||
|
||||
uintptr_t FindChunk(size_t size);
|
||||
void FreeChunk(uintptr_t address, size_t size);
|
||||
|
||||
bool TryReserveChunk(uintptr_t address, size_t size);
|
||||
|
||||
void GetUsedSuperPages(std::bitset<kMaxSuperPages>& used);
|
||||
uintptr_t GetBaseAddress();
|
||||
|
||||
private:
|
||||
base::Lock lock_;
|
||||
|
||||
// The bitset stores the allocation state of the address pool. 1 bit per
|
||||
// super-page: 1 = allocated, 0 = free.
|
||||
std::bitset<kMaxSuperPages> alloc_bitset_ GUARDED_BY(lock_);
|
||||
|
||||
// An index of a bit in the bitset before which we know for sure there all
|
||||
// 1s. This is a best-effort hint in the sense that there still may be lots
|
||||
// of 1s after this index, but at least we know there is no point in
|
||||
// starting the search before it.
|
||||
size_t bit_hint_ GUARDED_BY(lock_);
|
||||
|
||||
size_t total_bits_ = 0;
|
||||
uintptr_t address_begin_ = 0;
|
||||
#if DCHECK_IS_ON()
|
||||
uintptr_t address_end_ = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
|
||||
PA_DCHECK(0 < handle && handle <= kNumPools);
|
||||
return &pools_[handle - 1];
|
||||
}
|
||||
|
||||
Pool pools_[kNumPools];
|
||||
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
|
||||
DISALLOW_COPY_AND_ASSIGN(AddressPoolManager);
|
||||
};
|
||||
|
||||
ALWAYS_INLINE pool_handle GetNonBRPPool() {
|
||||
return kNonBRPPoolHandle;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE pool_handle GetBRPPool() {
|
||||
return kBRPPoolHandle;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
|
@ -0,0 +1,43 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/lazy_instance.h"
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
LazyInstance<Lock>::Leaky g_lock = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
} // namespace
|
||||
|
||||
Lock& AddressPoolManagerBitmap::GetLock() {
|
||||
return g_lock.Get();
|
||||
}
|
||||
|
||||
std::bitset<AddressPoolManagerBitmap::kNonBRPPoolBits>
|
||||
AddressPoolManagerBitmap::non_brp_pool_bits_; // GUARDED_BY(GetLock())
|
||||
std::bitset<AddressPoolManagerBitmap::kBRPPoolBits>
|
||||
AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock())
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
#if BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
std::array<std::atomic_bool,
|
||||
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
|
||||
AddressPoolManagerBitmap::brp_forbidden_super_page_map_;
|
||||
#endif
|
||||
std::array<std::atomic_uint32_t,
|
||||
AddressPoolManagerBitmap::kAddressSpaceSize / kSuperPageSize>
|
||||
AddressPoolManagerBitmap::super_page_refcount_map_;
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
@ -0,0 +1,205 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <bitset>
|
||||
#include <limits>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// AddressPoolManagerBitmap is a set of bitmaps that track whether a given
|
||||
// address is in a pool that supports BackupRefPtr, or in a pool that doesn't
|
||||
// support it. All PartitionAlloc allocations must be in either of the pools.
|
||||
//
|
||||
// This code is specific to 32-bit systems.
|
||||
class BASE_EXPORT AddressPoolManagerBitmap {
|
||||
public:
|
||||
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
|
||||
static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB;
|
||||
|
||||
// For BRP pool, we use partition page granularity to eliminate the guard
|
||||
// pages from the bitmap at the ends:
|
||||
// - Eliminating the guard page at the beginning is needed so that pointers
|
||||
// to the end of an allocation that immediately precede a super page in BRP
|
||||
// pool don't accidentally fall into that pool.
|
||||
// - Eliminating the guard page at the end is to ensure that the last page
|
||||
// of the address space isn't in the BRP pool. This allows using sentinels
|
||||
// like reinterpret_cast<void*>(-1) without a risk of triggering BRP logic
|
||||
// on an invalid address. (Note, 64-bit systems don't have this problem as
|
||||
// the upper half of the address space always belongs to the OS.)
|
||||
//
|
||||
// Note, direct map allocations also belong to this pool. The same logic as
|
||||
// above applies. It is important to note, however, that the granularity used
|
||||
// here has to be a minimum of partition page size and direct map allocation
|
||||
// granularity. Since DirectMapAllocationGranularity() is no smaller than
|
||||
// PageAllocationGranularity(), we don't need to decrease the bitmap
|
||||
// granularity any further.
|
||||
static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift();
|
||||
static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize();
|
||||
static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap,
|
||||
"");
|
||||
static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1;
|
||||
static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2;
|
||||
static constexpr size_t kBRPPoolBits =
|
||||
kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap;
|
||||
|
||||
// Non-BRP pool may include both normal bucket and direct map allocations, so
|
||||
// the bitmap granularity has to be at least as small as
|
||||
// DirectMapAllocationGranularity(). No need to eliminate guard pages at the
|
||||
// ends, as this is a BackupRefPtr-specific concern, hence no need to lower
|
||||
// the granularity to partition page size.
|
||||
static constexpr size_t kBitShiftOfNonBRPPoolBitmap =
|
||||
DirectMapAllocationGranularityShift();
|
||||
static constexpr size_t kBytesPer1BitOfNonBRPPoolBitmap =
|
||||
DirectMapAllocationGranularity();
|
||||
static_assert(kBytesPer1BitOfNonBRPPoolBitmap ==
|
||||
1 << kBitShiftOfNonBRPPoolBitmap,
|
||||
"");
|
||||
static constexpr size_t kNonBRPPoolBits =
|
||||
kAddressSpaceSize / kBytesPer1BitOfNonBRPPoolBitmap;
|
||||
|
||||
// Returns false for nullptr.
|
||||
static bool IsManagedByNonBRPPool(const void* address) {
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
static_assert(
|
||||
std::numeric_limits<uintptr_t>::max() >> kBitShiftOfNonBRPPoolBitmap <
|
||||
non_brp_pool_bits_.size(),
|
||||
"The bitmap is too small, will result in unchecked out of bounds "
|
||||
"accesses.");
|
||||
// It is safe to read |non_brp_pool_bits_| without a lock since the caller
|
||||
// is responsible for guaranteeing that the address is inside a valid
|
||||
// allocation and the deallocation call won't race with this call.
|
||||
return TS_UNCHECKED_READ(
|
||||
non_brp_pool_bits_)[address_as_uintptr >> kBitShiftOfNonBRPPoolBitmap];
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
static bool IsManagedByBRPPool(const void* address) {
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
static_assert(std::numeric_limits<uintptr_t>::max() >>
|
||||
kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(),
|
||||
"The bitmap is too small, will result in unchecked out of "
|
||||
"bounds accesses.");
|
||||
// It is safe to read |brp_pool_bits_| without a lock since the caller
|
||||
// is responsible for guaranteeing that the address is inside a valid
|
||||
// allocation and the deallocation call won't race with this call.
|
||||
return TS_UNCHECKED_READ(
|
||||
brp_pool_bits_)[address_as_uintptr >> kBitShiftOfBRPPoolBitmap];
|
||||
}
|
||||
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
static void IncrementOutsideOfBRPPoolPtrRefCount(const void* address) {
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
|
||||
#if BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
brp_forbidden_super_page_map_[address_as_uintptr >> kSuperPageShift].store(
|
||||
true, std::memory_order_relaxed);
|
||||
#else
|
||||
super_page_refcount_map_[address_as_uintptr >> kSuperPageShift].fetch_add(
|
||||
1, std::memory_order_relaxed);
|
||||
#endif // BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
}
|
||||
|
||||
static void DecrementOutsideOfBRPPoolPtrRefCount(const void* address) {
|
||||
#if BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
// No-op. In this mode, we only use one bit per super-page and, therefore,
|
||||
// can't tell if there's more than one associated raw_ptr<T> at a given
|
||||
// time. There's a small risk is that we may exhaust the entire address
|
||||
// space. On the other hand, a single relaxed store (in the above function)
|
||||
// is much less expensive than two CAS operations.
|
||||
#else
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
|
||||
super_page_refcount_map_[address_as_uintptr >> kSuperPageShift].fetch_sub(
|
||||
1, std::memory_order_relaxed);
|
||||
#endif // BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
}
|
||||
|
||||
static bool IsAllowedSuperPageForBRPPool(const void* address) {
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
|
||||
// The only potentially dangerous scenario, in which this check is used, is
|
||||
// when the assignment of the first raw_ptr<T> object for a non-GigaCage
|
||||
// address is racing with the allocation of a new GigCage super-page at the
|
||||
// same address. We assume that if raw_ptr<T> is being initialized with a
|
||||
// raw pointer, the associated allocation is "alive"; otherwise, the issue
|
||||
// should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
|
||||
// In the worst case, when such a fix is impossible, we should just undo the
|
||||
// raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
|
||||
// above assumption holds, the existing allocation will prevent us from
|
||||
// reserving the super-page region and, thus, having the race condition.
|
||||
// Since we rely on that external synchronization, the relaxed memory
|
||||
// ordering should be sufficient.
|
||||
#if BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
return !brp_forbidden_super_page_map_[address_as_uintptr >> kSuperPageShift]
|
||||
.load(std::memory_order_relaxed);
|
||||
#else
|
||||
return super_page_refcount_map_[address_as_uintptr >> kSuperPageShift].load(
|
||||
std::memory_order_relaxed) == 0;
|
||||
#endif // BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
}
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
private:
|
||||
friend class AddressPoolManager;
|
||||
|
||||
static Lock& GetLock();
|
||||
|
||||
static std::bitset<kNonBRPPoolBits> non_brp_pool_bits_ GUARDED_BY(GetLock());
|
||||
static std::bitset<kBRPPoolBits> brp_pool_bits_ GUARDED_BY(GetLock());
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
#if BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
static std::array<std::atomic_bool, kAddressSpaceSize / kSuperPageSize>
|
||||
brp_forbidden_super_page_map_;
|
||||
#endif // BUILDFLAG(NEVER_REMOVE_FROM_BRP_POOL_BLOCKLIST)
|
||||
static std::array<std::atomic_uint32_t, kAddressSpaceSize / kSuperPageSize>
|
||||
super_page_refcount_map_;
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAlloc(const void* address) {
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address));
|
||||
#endif
|
||||
return internal::AddressPoolManagerBitmap::IsManagedByNonBRPPool(address)
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|| internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)
|
||||
#endif
|
||||
;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAllocNonBRPPool(const void* address) {
|
||||
return internal::AddressPoolManagerBitmap::IsManagedByNonBRPPool(address);
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(const void* address) {
|
||||
return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address);
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // !defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_
|
@ -0,0 +1,17 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
|
||||
using pool_handle = unsigned;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_
|
@ -0,0 +1,68 @@
|
||||
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/random.h"
|
||||
#include "base/check_op.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include <windows.h> // Must be in front of other Windows header files.
|
||||
|
||||
#include <VersionHelpers.h>
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
void* GetRandomPageBase() {
|
||||
uintptr_t random = static_cast<uintptr_t>(RandomValue());
|
||||
|
||||
#if defined(ARCH_CPU_64_BITS)
|
||||
random <<= 32ULL;
|
||||
random |= static_cast<uintptr_t>(RandomValue());
|
||||
|
||||
// The ASLRMask() and ASLROffset() constants will be suitable for the
|
||||
// OS and build configuration.
|
||||
#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
// Windows >= 8.1 has the full 47 bits. Use them where available.
|
||||
static bool windows_81 = false;
|
||||
static bool windows_81_initialized = false;
|
||||
if (!windows_81_initialized) {
|
||||
windows_81 = IsWindows8Point1OrGreater();
|
||||
windows_81_initialized = true;
|
||||
}
|
||||
if (!windows_81) {
|
||||
random &= internal::ASLRMaskBefore8_10();
|
||||
} else {
|
||||
random &= internal::ASLRMask();
|
||||
}
|
||||
random += internal::ASLROffset();
|
||||
#else
|
||||
random &= internal::ASLRMask();
|
||||
random += internal::ASLROffset();
|
||||
#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
#else // defined(ARCH_CPU_32_BITS)
|
||||
#if defined(OS_WIN)
|
||||
// On win32 host systems the randomization plus huge alignment causes
|
||||
// excessive fragmentation. Plus most of these systems lack ASLR, so the
|
||||
// randomization isn't buying anything. In that case we just skip it.
|
||||
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
|
||||
static BOOL is_wow64 = -1;
|
||||
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
|
||||
is_wow64 = FALSE;
|
||||
if (!is_wow64)
|
||||
return nullptr;
|
||||
#endif // defined(OS_WIN)
|
||||
random &= internal::ASLRMask();
|
||||
random += internal::ASLROffset();
|
||||
#endif // defined(ARCH_CPU_32_BITS)
|
||||
|
||||
PA_DCHECK(!(random & PageAllocationGranularityOffsetMask()));
|
||||
return reinterpret_cast<void*>(random);
|
||||
}
|
||||
|
||||
} // namespace base
|
@ -0,0 +1,266 @@
|
||||
// Copyright 2014 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// Calculates a random preferred mapping address. In calculating an address, we
|
||||
// balance good ASLR against not fragmenting the address space too badly.
|
||||
BASE_EXPORT void* GetRandomPageBase();
|
||||
|
||||
namespace internal {
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
AslrAddress(uintptr_t mask) {
|
||||
return mask & PageAllocationGranularityBaseMask();
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
AslrMask(uintptr_t bits) {
|
||||
return AslrAddress((1ULL << bits) - 1ULL);
|
||||
}
|
||||
|
||||
// Turn off formatting, because the thicket of nested ifdefs below is
|
||||
// incomprehensible without indentation. It is also incomprehensible with
|
||||
// indentation, but the only other option is a combinatorial explosion of
|
||||
// *_{win,linux,mac,foo}_{32,64}.h files.
|
||||
//
|
||||
// clang-format off
|
||||
|
||||
#if defined(ARCH_CPU_64_BITS)
|
||||
|
||||
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
|
||||
|
||||
// We shouldn't allocate system pages at all for sanitizer builds. However,
|
||||
// we do, and if random hint addresses interfere with address ranges
|
||||
// hard-coded in those tools, bad things happen. This address range is
|
||||
// copied from TSAN source but works with all tools. See
|
||||
// https://crbug.com/539863.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrAddress(0x007fffffffffULL);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLROffset() {
|
||||
return AslrAddress(0x7e8000000000ULL);
|
||||
}
|
||||
|
||||
#elif defined(OS_WIN)
|
||||
|
||||
// Windows 8.10 and newer support the full 48 bit address range. Older
|
||||
// versions of Windows only support 44 bits. Since ASLROffset() is non-zero
|
||||
// and may cause a carry, use 47 and 43 bit masks. See
|
||||
// http://www.alex-ionescu.com/?p=246
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(47);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
|
||||
return AslrMask(43);
|
||||
}
|
||||
// Try not to map pages into the range where Windows loads DLLs by default.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return 0x80000000ULL;
|
||||
}
|
||||
|
||||
#elif defined(OS_APPLE)
|
||||
|
||||
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
|
||||
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
|
||||
// is destroyed. Using a virtual address space that is too large causes a
|
||||
// leak of about 1 wired [can never be paged out] page per call to mmap. The
|
||||
// page is only reclaimed when the process is killed. Confine the hint to a
|
||||
// 39-bit section of the virtual address space.
|
||||
//
|
||||
// This implementation adapted from
|
||||
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
|
||||
// is that here we clamp to 39 bits, not 32.
|
||||
//
|
||||
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
|
||||
// changes.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLROffset() {
|
||||
return AslrAddress(0x1000000000ULL);
|
||||
}
|
||||
|
||||
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
|
||||
|
||||
#if defined(ARCH_CPU_X86_64)
|
||||
|
||||
// Linux (and macOS) support the full 47-bit user space of x64 processors.
|
||||
// Use only 46 to allow the kernel a chance to fulfill the request.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(46);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#elif defined(ARCH_CPU_ARM64)
|
||||
|
||||
#if defined(OS_ANDROID)
|
||||
|
||||
// Restrict the address range on Android to avoid a large performance
|
||||
// regression in single-process WebViews. See https://crbug.com/837640.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||
// could cause a carry.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x1000000000ULL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#elif defined(ARCH_CPU_PPC64)
|
||||
|
||||
#if defined(OS_AIX)
|
||||
|
||||
// AIX has 64 bits of virtual addressing, but we limit the address range
|
||||
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
|
||||
// extra address space to isolate the mmap regions.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x400000000000ULL);
|
||||
}
|
||||
|
||||
#elif defined(ARCH_CPU_BIG_ENDIAN)
|
||||
|
||||
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(42);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||
|
||||
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(46);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||
|
||||
#elif defined(ARCH_CPU_S390X)
|
||||
|
||||
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
|
||||
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
|
||||
// chance to fulfill the request.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(40);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#elif defined(ARCH_CPU_S390)
|
||||
|
||||
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
|
||||
// a chance to fulfill the request.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(29);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||
|
||||
// For all other POSIX variants, use 30 bits.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
|
||||
#if defined(OS_SOLARIS)
|
||||
|
||||
// For our Solaris/illumos mmap hint, we pick a random address in the
|
||||
// bottom half of the top half of the address space (that is, the third
|
||||
// quarter). Because we do not MAP_FIXED, this will be treated only as a
|
||||
// hint -- the system will not fail to mmap because something else
|
||||
// happens to already be mapped at our random address. We deliberately
|
||||
// set the hint high enough to get well above the system's break (that
|
||||
// is, the heap); Solaris and illumos will try the hint and if that
|
||||
// fails allocate as if there were no hint at all. The high hint
|
||||
// prevents the break from getting hemmed in at low values, ceding half
|
||||
// of the address space to the system heap.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x80000000ULL);
|
||||
}
|
||||
|
||||
#elif defined(OS_AIX)
|
||||
|
||||
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
|
||||
// upper range.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x90000000ULL);
|
||||
}
|
||||
|
||||
#else // !defined(OS_SOLARIS) && !defined(OS_AIX)
|
||||
|
||||
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
|
||||
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
|
||||
// 10.6 and 10.7.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
#endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
|
||||
|
||||
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
|
||||
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||
|
||||
#endif // defined(OS_POSIX)
|
||||
|
||||
#elif defined(ARCH_CPU_32_BITS)
|
||||
|
||||
// This is a good range on 32-bit Windows and Android (the only platforms on
|
||||
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
|
||||
// is no issue with carries here.
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#error Please tell us about your exotic hardware! Sounds interesting.
|
||||
|
||||
#endif // defined(ARCH_CPU_32_BITS)
|
||||
|
||||
// clang-format on
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
|
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# This file contains a test function for checking Arm's branch target
|
||||
# identification (BTI) feature, which helps mitigate jump-oriented
|
||||
# programming. To get it working, BTI instructions must be executed
|
||||
# on a compatible core, and the executable pages must be mapped with
|
||||
# PROT_BTI. To validate that pages mapped with PROT_BTI are working
|
||||
# correctly:
|
||||
# 1) Allocate a read-write page.
|
||||
# 2) Copy between the start and end symbols into that page.
|
||||
# 3) Set the page to read-execute with PROT_BTI.
|
||||
# 4) Call the first offset of the page, verify the result.
|
||||
# 5) Call the second offset of the page (skipping the landing pad).
|
||||
# Verify that it crashes as expected.
|
||||
# This test works irrespective of whether BTI is enabled for C/C++
|
||||
# objects via -mbranch-protection=standard.
|
||||
|
||||
.text
|
||||
.global arm_bti_test_function
|
||||
.global arm_bti_test_function_invalid_offset
|
||||
.global arm_bti_test_function_end
|
||||
arm_bti_test_function:
|
||||
# Mark the start of this function as a valid call target.
|
||||
bti jc
|
||||
add x0, x0, #1
|
||||
arm_bti_test_function_invalid_offset:
|
||||
# This label simulates calling an incomplete function.
|
||||
# Jumping here should crash systems which support BTI.
|
||||
add x0, x0, #2
|
||||
ret
|
||||
arm_bti_test_function_end:
|
||||
nop
|
||||
|
||||
.pushsection .note.gnu.property, "a";
|
||||
.balign 8;
|
||||
.long 4;
|
||||
.long 0x10;
|
||||
.long 0x5;
|
||||
.asciz "GNU";
|
||||
.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
|
||||
.long 4;
|
||||
.long 1; /* GNU_PROPERTY_AARCH64_BTI */;
|
||||
.long 0;
|
||||
.popsection
|
||||
|
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
||||
|
||||
#include "build/build_config.h"
|
||||
#if defined(ARCH_CPU_ARM64)
|
||||
extern "C" {
|
||||
int64_t arm_bti_test_function(int64_t);
|
||||
int64_t arm_bti_test_function_invalid_offset(int64_t);
|
||||
void arm_bti_test_function_end(void);
|
||||
}
|
||||
#endif // defined(ARCH_CPU_ARM64)
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_
|
77
src/base/allocator/partition_allocator/extended_api.cc
Normal file
77
src/base/allocator/partition_allocator/extended_api.cc
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/extended_api.h"
|
||||
|
||||
#include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
|
||||
namespace {
|
||||
|
||||
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||
// Some platforms don't have a thread cache, or it could already have been
|
||||
// disabled.
|
||||
if (!root || !root->with_thread_cache)
|
||||
return;
|
||||
|
||||
internal::ThreadCacheRegistry::Instance().PurgeAll();
|
||||
root->with_thread_cache = false;
|
||||
// Doesn't destroy the thread cache object(s). For background threads, they
|
||||
// will be collected (and free cached memory) at thread destruction
|
||||
// time. For the main thread, we leak it.
|
||||
}
|
||||
|
||||
void EnablePartitionAllocThreadCacheForRootIfDisabled(
|
||||
ThreadSafePartitionRoot* root) {
|
||||
if (!root)
|
||||
return;
|
||||
root->with_thread_cache = true;
|
||||
}
|
||||
|
||||
void DisablePartitionAllocThreadCacheForProcess() {
|
||||
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
|
||||
auto* aligned_allocator = internal::PartitionAllocMalloc::AlignedAllocator();
|
||||
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||
if (aligned_allocator != regular_allocator)
|
||||
DisableThreadCacheForRootIfEnabled(aligned_allocator);
|
||||
DisableThreadCacheForRootIfEnabled(
|
||||
internal::PartitionAllocMalloc::OriginalAllocator());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
|
||||
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
DisablePartitionAllocThreadCacheForProcess();
|
||||
internal::ThreadCache::SwapForTesting(root);
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
}
|
||||
|
||||
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
// First, disable the test thread cache we have.
|
||||
DisableThreadCacheForRootIfEnabled(root);
|
||||
|
||||
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||
|
||||
internal::ThreadCache::SwapForTesting(regular_allocator);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
}
|
||||
|
||||
} // namespace base
|
27
src/base/allocator/partition_allocator/extended_api.h
Normal file
27
src/base/allocator/partition_allocator/extended_api.h
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
// These two functions are unsafe to run if there are multiple threads running
|
||||
// in the process.
|
||||
//
|
||||
// Disables the thread cache for the entire process, and replaces it with a
|
||||
// thread cache for |root|.
|
||||
BASE_EXPORT void SwapOutProcessThreadCacheForTesting(
|
||||
ThreadSafePartitionRoot* root);
|
||||
// Disables the current thread cache, and replaces it with the default for the
|
||||
// process.
|
||||
BASE_EXPORT void SwapInProcessThreadCacheForTesting(
|
||||
ThreadSafePartitionRoot* root);
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
163
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
163
src/base/allocator/partition_allocator/memory_reclaimer.cc
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/location.h"
|
||||
#include "base/metrics/histogram_functions.h"
|
||||
#include "base/trace_event/base_tracing.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
template <bool thread_safe>
|
||||
void Insert(std::set<PartitionRoot<thread_safe>*>* partitions,
|
||||
PartitionRoot<thread_safe>* partition) {
|
||||
PA_DCHECK(partition);
|
||||
auto it_and_whether_inserted = partitions->insert(partition);
|
||||
PA_DCHECK(it_and_whether_inserted.second);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void Remove(std::set<PartitionRoot<thread_safe>*>* partitions,
|
||||
PartitionRoot<thread_safe>* partition) {
|
||||
PA_DCHECK(partition);
|
||||
size_t erased_count = partitions->erase(partition);
|
||||
PA_DCHECK(erased_count == 1u);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
|
||||
static NoDestructor<PartitionAllocMemoryReclaimer> instance;
|
||||
return instance.get();
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::RegisterPartition(
|
||||
PartitionRoot<internal::ThreadSafe>* partition) {
|
||||
AutoLock lock(lock_);
|
||||
Insert(&thread_safe_partitions_, partition);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::RegisterPartition(
|
||||
PartitionRoot<internal::NotThreadSafe>* partition) {
|
||||
AutoLock lock(lock_);
|
||||
Insert(&thread_unsafe_partitions_, partition);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::UnregisterPartition(
|
||||
PartitionRoot<internal::ThreadSafe>* partition) {
|
||||
AutoLock lock(lock_);
|
||||
Remove(&thread_safe_partitions_, partition);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::UnregisterPartition(
|
||||
PartitionRoot<internal::NotThreadSafe>* partition) {
|
||||
AutoLock lock(lock_);
|
||||
Remove(&thread_unsafe_partitions_, partition);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::Start(
|
||||
scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||
PA_DCHECK(!timer_);
|
||||
PA_DCHECK(task_runner);
|
||||
|
||||
{
|
||||
AutoLock lock(lock_);
|
||||
PA_DCHECK(!thread_safe_partitions_.empty());
|
||||
}
|
||||
|
||||
// This does not need to run on the main thread, however there are a few
|
||||
// reasons to do it there:
|
||||
// - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
|
||||
// is more likely in cache when executing on the main thread.
|
||||
// - Memory reclaim takes the partition lock for each partition. As a
|
||||
// consequence, while reclaim is running, the main thread is unlikely to be
|
||||
// able to make progress, as it would be waiting on the lock.
|
||||
// - Finally, this runs in idle time only, so there should be no visible
|
||||
// impact.
|
||||
//
|
||||
// From local testing, time to reclaim is 100us-1ms, and reclaiming every few
|
||||
// seconds is useful. Since this is meant to run during idle time only, it is
|
||||
// a reasonable starting point balancing effectivenes vs cost. See
|
||||
// crbug.com/942512 for details and experimental results.
|
||||
constexpr TimeDelta kInterval = TimeDelta::FromSeconds(4);
|
||||
|
||||
timer_ = std::make_unique<RepeatingTimer>();
|
||||
timer_->SetTaskRunner(task_runner);
|
||||
// Here and below, |Unretained(this)| is fine as |this| lives forever, as a
|
||||
// singleton.
|
||||
timer_->Start(
|
||||
FROM_HERE, kInterval,
|
||||
BindRepeating(&PartitionAllocMemoryReclaimer::ReclaimPeriodically,
|
||||
Unretained(this)));
|
||||
}
|
||||
|
||||
PartitionAllocMemoryReclaimer::PartitionAllocMemoryReclaimer() = default;
|
||||
PartitionAllocMemoryReclaimer::~PartitionAllocMemoryReclaimer() = default;
|
||||
|
||||
void PartitionAllocMemoryReclaimer::ReclaimAll() {
|
||||
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
|
||||
PartitionPurgeDiscardUnusedSystemPages |
|
||||
PartitionPurgeAggressiveReclaim;
|
||||
Reclaim(kFlags);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::ReclaimPeriodically() {
|
||||
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
|
||||
PartitionPurgeDiscardUnusedSystemPages;
|
||||
Reclaim(kFlags);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::Reclaim(int flags) {
|
||||
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
|
||||
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
|
||||
|
||||
// PCScan quarantines freed slots. Trigger the scan first to let it call
|
||||
// FreeNoHooksImmediate on slots that pass the quarantine.
|
||||
//
|
||||
// In turn, FreeNoHooksImmediate may add slots to thread cache. Purge it next
|
||||
// so that the slots are actually freed. (This is done synchronously only for
|
||||
// the current thread.)
|
||||
//
|
||||
// Lastly decommit empty slot spans and lastly try to discard unused pages at
|
||||
// the end of the remaining active slots.
|
||||
{
|
||||
using PCScan = internal::PCScan;
|
||||
const auto invocation_mode = flags & PartitionPurgeAggressiveReclaim
|
||||
? PCScan::InvocationMode::kForcedBlocking
|
||||
: PCScan::InvocationMode::kBlocking;
|
||||
PCScan::PerformScanIfNeeded(invocation_mode);
|
||||
}
|
||||
|
||||
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
// Don't completely empty the thread cache outside of low memory situations,
|
||||
// as there is periodic purge which makes sure that it doesn't take too much
|
||||
// space.
|
||||
if (flags & PartitionPurgeAggressiveReclaim)
|
||||
internal::ThreadCacheRegistry::Instance().PurgeAll();
|
||||
#endif
|
||||
|
||||
for (auto* partition : thread_safe_partitions_)
|
||||
partition->PurgeMemory(flags);
|
||||
for (auto* partition : thread_unsafe_partitions_)
|
||||
partition->PurgeMemory(flags);
|
||||
}
|
||||
|
||||
void PartitionAllocMemoryReclaimer::ResetForTesting() {
|
||||
AutoLock lock(lock_);
|
||||
|
||||
timer_ = nullptr;
|
||||
thread_safe_partitions_.clear();
|
||||
thread_unsafe_partitions_.clear();
|
||||
}
|
||||
|
||||
} // namespace base
|
72
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
72
src/base/allocator/partition_allocator/memory_reclaimer.h
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/sequenced_task_runner.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/timer/timer.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// Posts and handles memory reclaim tasks for PartitionAlloc.
|
||||
//
|
||||
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
|
||||
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
|
||||
// context of the provided |SequencedTaskRunner|, meaning that the caller must
|
||||
// take care of this runner being compatible with the various partitions.
|
||||
//
|
||||
// Singleton as this runs as long as the process is alive, and
|
||||
// having multiple instances would be wasteful.
|
||||
class BASE_EXPORT PartitionAllocMemoryReclaimer {
|
||||
public:
|
||||
static PartitionAllocMemoryReclaimer* Instance();
|
||||
|
||||
// Internal. Do not use.
|
||||
// Registers a partition to be tracked by the reclaimer.
|
||||
void RegisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
|
||||
void RegisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
|
||||
// Internal. Do not use.
|
||||
// Unregisters a partition to be tracked by the reclaimer.
|
||||
void UnregisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
|
||||
void UnregisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
|
||||
// Starts the periodic reclaim. Should be called once.
|
||||
void Start(scoped_refptr<SequencedTaskRunner> task_runner);
|
||||
// Triggers an explicit reclaim now reclaiming all free memory
|
||||
void ReclaimAll();
|
||||
// Triggers an explicit reclaim now to reclaim as much free memory as
|
||||
// possible.
|
||||
void ReclaimPeriodically();
|
||||
|
||||
private:
|
||||
PartitionAllocMemoryReclaimer();
|
||||
~PartitionAllocMemoryReclaimer();
|
||||
// |flags| is an OR of base::PartitionPurgeFlags
|
||||
void Reclaim(int flags);
|
||||
void ReclaimAndReschedule();
|
||||
void ResetForTesting();
|
||||
|
||||
// Schedules periodic |Reclaim()|.
|
||||
std::unique_ptr<RepeatingTimer> timer_;
|
||||
|
||||
Lock lock_;
|
||||
std::set<PartitionRoot<internal::ThreadSafe>*> thread_safe_partitions_
|
||||
GUARDED_BY(lock_);
|
||||
std::set<PartitionRoot<internal::NotThreadSafe>*> thread_unsafe_partitions_
|
||||
GUARDED_BY(lock_);
|
||||
|
||||
friend class NoDestructor<PartitionAllocMemoryReclaimer>;
|
||||
friend class PartitionAllocMemoryReclaimerTest;
|
||||
DISALLOW_COPY_AND_ASSIGN(PartitionAllocMemoryReclaimer);
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
|
19
src/base/allocator/partition_allocator/oom.cc
Normal file
19
src/base/allocator/partition_allocator/oom.cc
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
|
||||
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "base/process/memory.h"
|
||||
|
||||
// The crash is generated in a NOINLINE function so that we can classify the
|
||||
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||
// NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||
[[noreturn]] NOINLINE void NOT_TAIL_CALLED OnNoMemory(size_t size) {
|
||||
base::internal::RunPartitionAllocOomCallback();
|
||||
base::TerminateBecauseOutOfMemory(size);
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
27
src/base/allocator/partition_allocator/oom.h
Normal file
27
src/base/allocator/partition_allocator/oom.h
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
// The crash is generated in a NOINLINE function so that we can classify the
|
||||
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||
// NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||
[[noreturn]] BASE_EXPORT void NOT_TAIL_CALLED OnNoMemory(size_t size);
|
||||
|
||||
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
|
||||
// exception on Windows to signal this is OOM and not a normal assert.
|
||||
// OOM_CRASH(size) is called by users of PageAllocator (including
|
||||
// PartitionAlloc) to signify an allocation failure from the platform.
|
||||
#define OOM_CRASH(size) \
|
||||
do { \
|
||||
OnNoMemory(size); \
|
||||
} while (0)
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
|
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
27
src/base/allocator/partition_allocator/oom_callback.cc
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/oom_callback.h"
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
PartitionAllocOomCallback g_oom_callback;
|
||||
} // namespace
|
||||
|
||||
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
|
||||
PA_DCHECK(!g_oom_callback);
|
||||
g_oom_callback = callback;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
void RunPartitionAllocOomCallback() {
|
||||
if (g_oom_callback)
|
||||
g_oom_callback();
|
||||
}
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
24
src/base/allocator/partition_allocator/oom_callback.h
Normal file
24
src/base/allocator/partition_allocator/oom_callback.h
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
typedef void (*PartitionAllocOomCallback)();
|
||||
// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
|
||||
// invoked by users of PageAllocator (including PartitionAlloc) to signify an
|
||||
// allocation failure from the platform.
|
||||
BASE_EXPORT void SetPartitionAllocOomCallback(
|
||||
PartitionAllocOomCallback callback);
|
||||
|
||||
namespace internal {
|
||||
BASE_EXPORT void RunPartitionAllocOomCallback();
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
|
349
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
349
src/base/allocator/partition_allocator/page_allocator.cc
Normal file
@ -0,0 +1,349 @@
|
||||
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/lazy_instance.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
|
||||
#elif defined(OS_POSIX)
|
||||
#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
|
||||
#elif defined(OS_FUCHSIA)
|
||||
#include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
|
||||
#else
|
||||
#error Platform not supported.
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
LazyInstance<Lock>::Leaky g_reserve_lock = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
// We may reserve/release address space on different threads.
|
||||
Lock& GetReserveLock() {
|
||||
return g_reserve_lock.Get();
|
||||
}
|
||||
|
||||
std::atomic<size_t> g_total_mapped_address_space;
|
||||
|
||||
// We only support a single block of reserved address space.
|
||||
void* s_reservation_address GUARDED_BY(GetReserveLock()) = nullptr;
|
||||
size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0;
|
||||
|
||||
void* AllocPagesIncludingReserved(void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
void* ret = SystemAllocPages(address, length, accessibility, page_tag);
|
||||
if (ret == nullptr) {
|
||||
const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
|
||||
if (cant_alloc_length) {
|
||||
// The system cannot allocate |length| bytes. Release any reserved address
|
||||
// space and try once more.
|
||||
ReleaseReservation();
|
||||
ret = SystemAllocPages(address, length, accessibility, page_tag);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Trims |base| to given |trim_length| and |alignment|.
|
||||
//
|
||||
// On failure, on Windows, this function returns nullptr and frees |base|.
|
||||
void* TrimMapping(void* base,
|
||||
size_t base_length,
|
||||
size_t trim_length,
|
||||
uintptr_t alignment,
|
||||
uintptr_t alignment_offset,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
PA_DCHECK(base_length >= trim_length);
|
||||
PA_DCHECK(bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(alignment_offset < alignment);
|
||||
uintptr_t base_as_uintptr = reinterpret_cast<uintptr_t>(base);
|
||||
uintptr_t new_base =
|
||||
NextAlignedWithOffset(base_as_uintptr, alignment, alignment_offset);
|
||||
PA_DCHECK(new_base >= base_as_uintptr);
|
||||
size_t pre_slack = new_base - base_as_uintptr;
|
||||
size_t post_slack = base_length - pre_slack - trim_length;
|
||||
PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
|
||||
PA_DCHECK(pre_slack < base_length);
|
||||
PA_DCHECK(post_slack < base_length);
|
||||
return TrimMappingInternal(base, base_length, trim_length, accessibility,
|
||||
pre_slack, post_slack);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Align |address| up to the closest, non-smaller address, that gives
|
||||
// |requested_offset| remainder modulo |alignment|.
|
||||
//
|
||||
// Examples for alignment=1024 and requested_offset=64:
|
||||
// 64 -> 64
|
||||
// 65 -> 1088
|
||||
// 1024 -> 1088
|
||||
// 1088 -> 1088
|
||||
// 1089 -> 2112
|
||||
// 2048 -> 2112
|
||||
uintptr_t NextAlignedWithOffset(uintptr_t address,
|
||||
uintptr_t alignment,
|
||||
uintptr_t requested_offset) {
|
||||
PA_DCHECK(bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(requested_offset < alignment);
|
||||
|
||||
uintptr_t actual_offset = address & (alignment - 1);
|
||||
uintptr_t new_address;
|
||||
if (actual_offset <= requested_offset)
|
||||
new_address = address + requested_offset - actual_offset;
|
||||
else
|
||||
new_address = address + alignment + requested_offset - actual_offset;
|
||||
PA_DCHECK(new_address >= address);
|
||||
PA_DCHECK(new_address - address < alignment);
|
||||
PA_DCHECK(new_address % alignment == requested_offset);
|
||||
|
||||
return new_address;
|
||||
}
|
||||
|
||||
void* SystemAllocPages(void* hint,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
|
||||
PageAllocationGranularityOffsetMask()));
|
||||
void* ptr = SystemAllocPagesInternal(hint, length, accessibility, page_tag);
|
||||
if (ptr)
|
||||
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* AllocPages(void* address,
|
||||
size_t length,
|
||||
size_t align,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
|
||||
page_tag);
|
||||
}
|
||||
|
||||
void* AllocPagesWithAlignOffset(void* address,
|
||||
size_t length,
|
||||
size_t align,
|
||||
size_t align_offset,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
PA_DCHECK(length >= PageAllocationGranularity());
|
||||
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(align >= PageAllocationGranularity());
|
||||
// Alignment must be power of 2 for masking math to work.
|
||||
PA_DCHECK(base::bits::IsPowerOfTwo(align));
|
||||
PA_DCHECK(align_offset < align);
|
||||
PA_DCHECK(!(align_offset & PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
|
||||
PageAllocationGranularityOffsetMask()));
|
||||
uintptr_t align_offset_mask = align - 1;
|
||||
uintptr_t align_base_mask = ~align_offset_mask;
|
||||
PA_DCHECK(address == nullptr || (reinterpret_cast<uintptr_t>(address) &
|
||||
align_offset_mask) == align_offset);
|
||||
|
||||
// If the client passed null as the address, choose a good one.
|
||||
if (address == nullptr) {
|
||||
address = GetRandomPageBase();
|
||||
address = reinterpret_cast<void*>(
|
||||
(reinterpret_cast<uintptr_t>(address) & align_base_mask) +
|
||||
align_offset);
|
||||
}
|
||||
|
||||
// First try to force an exact-size, aligned allocation from our random base.
|
||||
#if defined(ARCH_CPU_32_BITS)
|
||||
// On 32 bit systems, first try one random aligned address, and then try an
|
||||
// aligned address derived from the value of |ret|.
|
||||
constexpr int kExactSizeTries = 2;
|
||||
#else
|
||||
// On 64 bit systems, try 3 random aligned addresses.
|
||||
constexpr int kExactSizeTries = 3;
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < kExactSizeTries; ++i) {
|
||||
void* ret =
|
||||
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
|
||||
if (ret != nullptr) {
|
||||
// If the alignment is to our liking, we're done.
|
||||
if ((reinterpret_cast<uintptr_t>(ret) & align_offset_mask) ==
|
||||
align_offset)
|
||||
return ret;
|
||||
// Free the memory and try again.
|
||||
FreePages(ret, length);
|
||||
} else {
|
||||
// |ret| is null; if this try was unhinted, we're OOM.
|
||||
if (kHintIsAdvisory || address == nullptr)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#if defined(ARCH_CPU_32_BITS)
|
||||
// For small address spaces, try the first aligned address >= |ret|. Note
|
||||
// |ret| may be null, in which case |address| becomes null. If
|
||||
// |align_offset| is non-zero, this calculation may get us not the first,
|
||||
// but the next matching address.
|
||||
address = reinterpret_cast<void*>(
|
||||
((reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
|
||||
align_base_mask) +
|
||||
align_offset);
|
||||
#else // defined(ARCH_CPU_64_BITS)
|
||||
// Keep trying random addresses on systems that have a large address space.
|
||||
address = GetRandomPageBase();
|
||||
address = reinterpret_cast<void*>(NextAlignedWithOffset(
|
||||
reinterpret_cast<uintptr_t>(address), align, align_offset));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Make a larger allocation so we can force alignment.
|
||||
size_t try_length = length + (align - PageAllocationGranularity());
|
||||
PA_CHECK(try_length >= length);
|
||||
void* ret;
|
||||
|
||||
do {
|
||||
// Continue randomizing only on POSIX.
|
||||
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
|
||||
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
|
||||
page_tag);
|
||||
// The retries are for Windows, where a race can steal our mapping on
|
||||
// resize.
|
||||
} while (ret != nullptr &&
|
||||
(ret = TrimMapping(ret, try_length, length, align, align_offset,
|
||||
accessibility)) == nullptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void FreePages(void* address, size_t length) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
|
||||
PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(!(length & PageAllocationGranularityOffsetMask()));
|
||||
FreePagesInternal(address, length);
|
||||
PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
|
||||
g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
bool TrySetSystemPagesAccess(void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
return TrySetSystemPagesAccessInternal(address, length, accessibility);
|
||||
}
|
||||
|
||||
void SetSystemPagesAccess(void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
SetSystemPagesAccessInternal(address, length, accessibility);
|
||||
}
|
||||
|
||||
void DecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
DecommitSystemPagesInternal(address, length, accessibility_disposition);
|
||||
}
|
||||
|
||||
void DecommitAndZeroSystemPages(void* address, size_t length) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
DecommitAndZeroSystemPagesInternal(address, length);
|
||||
}
|
||||
|
||||
void RecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
PA_DCHECK(accessibility != PageInaccessible);
|
||||
RecommitSystemPagesInternal(address, length, accessibility,
|
||||
accessibility_disposition);
|
||||
}
|
||||
|
||||
bool TryRecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// Duplicated because we want errors to be reported at a lower level in the
|
||||
// crashing case.
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & SystemPageOffsetMask()));
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
PA_DCHECK(accessibility != PageInaccessible);
|
||||
return TryRecommitSystemPagesInternal(address, length, accessibility,
|
||||
accessibility_disposition);
|
||||
}
|
||||
|
||||
void DiscardSystemPages(void* address, size_t length) {
|
||||
PA_DCHECK(!(length & SystemPageOffsetMask()));
|
||||
DiscardSystemPagesInternal(address, length);
|
||||
}
|
||||
|
||||
bool ReserveAddressSpace(size_t size) {
|
||||
// To avoid deadlock, call only SystemAllocPages.
|
||||
AutoLock guard(GetReserveLock());
|
||||
if (s_reservation_address == nullptr) {
|
||||
void* mem =
|
||||
SystemAllocPages(nullptr, size, PageInaccessible, PageTag::kChromium);
|
||||
if (mem != nullptr) {
|
||||
// We guarantee this alignment when reserving address space.
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
|
||||
PageAllocationGranularityOffsetMask()));
|
||||
s_reservation_address = mem;
|
||||
s_reservation_size = size;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ReleaseReservation() {
|
||||
// To avoid deadlock, call only FreePages.
|
||||
AutoLock guard(GetReserveLock());
|
||||
if (!s_reservation_address)
|
||||
return false;
|
||||
|
||||
FreePages(s_reservation_address, s_reservation_size);
|
||||
s_reservation_address = nullptr;
|
||||
s_reservation_size = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HasReservationForTesting() {
|
||||
AutoLock guard(GetReserveLock());
|
||||
return s_reservation_address != nullptr;
|
||||
}
|
||||
|
||||
uint32_t GetAllocPageErrorCode() {
|
||||
return s_allocPageErrorCode;
|
||||
}
|
||||
|
||||
size_t GetTotalMappedSize() {
|
||||
return g_total_mapped_address_space;
|
||||
}
|
||||
|
||||
} // namespace base
|
287
src/base/allocator/partition_allocator/page_allocator.h
Normal file
287
src/base/allocator/partition_allocator/page_allocator.h
Normal file
@ -0,0 +1,287 @@
|
||||
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
enum PageAccessibilityConfiguration {
|
||||
PageInaccessible,
|
||||
PageRead,
|
||||
PageReadWrite,
|
||||
// This flag is mapped to PageReadWrite on systems that
|
||||
// don't support MTE.
|
||||
PageReadWriteTagged,
|
||||
// This flag is mapped to PageReadExecute on systems
|
||||
// that don't support Arm's BTI.
|
||||
PageReadExecuteProtected,
|
||||
PageReadExecute,
|
||||
// This flag is deprecated and will go away soon.
|
||||
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
|
||||
PageReadWriteExecute,
|
||||
};
|
||||
|
||||
// Use for De/RecommitSystemPages API.
|
||||
enum PageAccessibilityDisposition {
|
||||
// Enforces permission update (Decommit will set to PageInaccessible;
|
||||
// Recommit will set to whatever was requested, other than PageInaccessible).
|
||||
PageUpdatePermissions,
|
||||
// Will not update permissions, if the platform supports that (POSIX & Fuchsia
|
||||
// only).
|
||||
PageKeepPermissionsIfPossible,
|
||||
};
|
||||
|
||||
// macOS supports tagged memory regions, to help in debugging. On Android,
|
||||
// these tags are used to name anonymous mappings.
|
||||
enum class PageTag {
|
||||
kFirst = 240, // Minimum tag value.
|
||||
kBlinkGC = 252, // Blink GC pages.
|
||||
kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
|
||||
kChromium = 254, // Chromium page.
|
||||
kV8 = 255, // V8 heap pages.
|
||||
kLast = kV8 // Maximum tag value.
|
||||
};
|
||||
|
||||
BASE_EXPORT uintptr_t NextAlignedWithOffset(uintptr_t ptr,
|
||||
uintptr_t alignment,
|
||||
uintptr_t requested_offset);
|
||||
|
||||
// Allocate one or more pages.
|
||||
//
|
||||
// The requested |address| is just a hint; the actual address returned may
|
||||
// differ. The returned address will be aligned to |align_offset| modulo |align|
|
||||
// bytes.
|
||||
//
|
||||
// |length|, |align| and |align_offset| are in bytes, and must be a multiple of
|
||||
// |PageAllocationGranularity()|. |length| and |align| must be non-zero.
|
||||
// |align_offset| must be less than |align|. |align| must be a power of two.
|
||||
//
|
||||
// If |address| is null, then a suitable and randomized address will be chosen
|
||||
// automatically.
|
||||
//
|
||||
// |accessibility| controls the permission of the allocated pages.
|
||||
// PageInaccessible means uncommitted.
|
||||
//
|
||||
// |page_tag| is used on some platforms to identify the source of the
|
||||
// allocation. Use PageTag::kChromium as a catch-all category.
|
||||
//
|
||||
// This call will return null if the allocation cannot be satisfied.
|
||||
BASE_EXPORT void* AllocPages(void* address,
|
||||
size_t length,
|
||||
size_t align,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag);
|
||||
BASE_EXPORT void* AllocPagesWithAlignOffset(
|
||||
void* address,
|
||||
size_t length,
|
||||
size_t align,
|
||||
size_t align_offset,
|
||||
PageAccessibilityConfiguration page_accessibility,
|
||||
PageTag page_tag);
|
||||
|
||||
// Free one or more pages starting at |address| and continuing for |length|
|
||||
// bytes.
|
||||
//
|
||||
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
|
||||
// |address| must be aligned to |PageAllocationGranularity()| bytes, and
|
||||
// |length| must be a multiple of |PageAllocationGranularity()|.
|
||||
BASE_EXPORT void FreePages(void* address, size_t length);
|
||||
|
||||
// Mark one or more system pages, starting at |address| with the given
|
||||
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||
// bytes.
|
||||
//
|
||||
// Returns true if the permission change succeeded. In most cases you must
|
||||
// |CHECK| the result.
|
||||
BASE_EXPORT WARN_UNUSED_RESULT bool TrySetSystemPagesAccess(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration page_accessibility);
|
||||
|
||||
// Mark one or more system pages, starting at |address| with the given
|
||||
// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
|
||||
// bytes.
|
||||
//
|
||||
// Performs a CHECK that the operation succeeds.
|
||||
BASE_EXPORT void SetSystemPagesAccess(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration page_accessibility);
|
||||
|
||||
// Decommit one or more system pages starting at |address| and continuing for
|
||||
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||
// boundary.
|
||||
//
|
||||
// |accessibility_disposition| allows to specify whether the pages should be
|
||||
// made inaccessible (PageUpdatePermissions), or left as is
|
||||
// (PageKeepPermissionsIfPossible, POSIX & Fuchsia only). The latter should only
|
||||
// be used as an optimization if you really know what you're doing.
|
||||
// TODO(bartekn): Ideally, all callers should use PageUpdatePermissions,
|
||||
// for better security, but that may lead to a perf regression. Tracked at
|
||||
// http://crbug.com/766882.
|
||||
//
|
||||
// Decommitted means that physical resources (RAM or swap) backing the allocated
|
||||
// virtual address range may be released back to the system, but the address
|
||||
// space is still allocated to the process (possibly using up page table entries
|
||||
// or other accounting resources). There is no guarantee that the pages are
|
||||
// zeroed, see |DecommittedMemoryIsAlwaysZeroed()| for such a guarantee. Unless
|
||||
// PageKeepPermissionsIfPossible disposition is used, any access to a
|
||||
// decommitted region of memory is an error and will generate a fault.
|
||||
//
|
||||
// This operation is not atomic on all platforms.
|
||||
//
|
||||
// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
|
||||
// processes will not fault when touching a committed memory region. There is
|
||||
// no analogue in the POSIX & Fuchsia memory API where virtual memory pages are
|
||||
// best-effort allocated resources on the first touch. If PageUpdatePermissions
|
||||
// disposition is used, this API behaves in a platform-agnostic way by
|
||||
// simulating the Windows "decommit" state by both discarding the region
|
||||
// (allowing the OS to avoid swap operations) *and* changing the page
|
||||
// protections so accesses fault.
|
||||
//
|
||||
// This API will crash if the operation cannot be performed.
|
||||
BASE_EXPORT void DecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityDisposition accessibility_disposition);
|
||||
|
||||
// Decommit one or more system pages starting at |address| and continuing for
|
||||
// |length| bytes. |address| and |length| must be aligned to a system page
|
||||
// boundary.
|
||||
//
|
||||
// In contrast to |DecommitSystemPages|, this API guarantees that the pages are
|
||||
// zeroed and will always mark the region as inaccessible (the equivalent of
|
||||
// setting them to PageInaccessible).
|
||||
//
|
||||
// This API will crash if the operation cannot be performed.
|
||||
BASE_EXPORT void DecommitAndZeroSystemPages(void* address, size_t length);
|
||||
|
||||
// Whether decommitted memory is guaranteed to be zeroed when it is
|
||||
// recommitted. Do not assume that this will not change over time.
|
||||
constexpr BASE_EXPORT bool DecommittedMemoryIsAlwaysZeroed() {
|
||||
#if defined(OS_APPLE)
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Recommit one or more system pages, starting at |address| and continuing for
|
||||
// |length| bytes with the given |page_accessibility| (must not be
|
||||
// PageInaccsessible). |address| and |length| must be aligned to a system page
|
||||
// boundary.
|
||||
//
|
||||
// |accessibility_disposition| allows to specify whether the page permissions
|
||||
// should be set to |page_accessibility| (PageUpdatePermissions), or left as is
|
||||
// (PageKeepPermissionsIfPossible, POSIX & Fuchsia only). The latter can only be
|
||||
// used if the pages were previously accessible and decommitted with
|
||||
// PageKeepPermissionsIfPossible. It is ok, however, to recommit with
|
||||
// PageUpdatePermissions even if pages were decommitted with
|
||||
// PageKeepPermissionsIfPossible (merely losing an optimization).
|
||||
//
|
||||
// This operation is not atomic on all platforms.
|
||||
//
|
||||
// This API will crash if the operation cannot be performed.
|
||||
BASE_EXPORT void RecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration page_accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition);
|
||||
|
||||
// Like RecommitSystemPages(), but returns false instead of crashing.
|
||||
BASE_EXPORT bool TryRecommitSystemPages(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration page_accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) WARN_UNUSED_RESULT;
|
||||
|
||||
// Discard one or more system pages starting at |address| and continuing for
|
||||
// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
|
||||
//
|
||||
// Discarding is a hint to the system that the page is no longer required. The
|
||||
// hint may:
|
||||
// - Do nothing.
|
||||
// - Discard the page immediately, freeing up physical pages.
|
||||
// - Discard the page at some time in the future in response to memory
|
||||
// pressure.
|
||||
//
|
||||
// Only committed pages should be discarded. Discarding a page does not decommit
|
||||
// it, and it is valid to discard an already-discarded page. A read or write to
|
||||
// a discarded page will not fault.
|
||||
//
|
||||
// Reading from a discarded page may return the original page content, or a page
|
||||
// full of zeroes.
|
||||
//
|
||||
// Writing to a discarded page is the only guaranteed way to tell the system
|
||||
// that the page is required again. Once written to, the content of the page is
|
||||
// guaranteed stable once more. After being written to, the page content may be
|
||||
// based on the original page content, or a page of zeroes.
|
||||
BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
|
||||
|
||||
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
|
||||
// 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
RoundUpToSystemPage(uintptr_t address) {
|
||||
return (address + SystemPageOffsetMask()) & SystemPageBaseMask();
|
||||
}
|
||||
|
||||
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
|
||||
// 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
RoundDownToSystemPage(uintptr_t address) {
|
||||
return address & SystemPageBaseMask();
|
||||
}
|
||||
|
||||
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
|
||||
// Returns 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
RoundUpToPageAllocationGranularity(uintptr_t address) {
|
||||
return (address + PageAllocationGranularityOffsetMask()) &
|
||||
PageAllocationGranularityBaseMask();
|
||||
}
|
||||
|
||||
// Rounds down |address| to the previous multiple of
|
||||
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
RoundDownToPageAllocationGranularity(uintptr_t address) {
|
||||
return address & PageAllocationGranularityBaseMask();
|
||||
}
|
||||
|
||||
// Reserves (at least) |size| bytes of address space, aligned to
|
||||
// |PageAllocationGranularity()|. This can be called early on to make it more
|
||||
// likely that large allocations will succeed. Returns true if the reservation
|
||||
// succeeded, false if the reservation failed or a reservation was already made.
|
||||
BASE_EXPORT bool ReserveAddressSpace(size_t size);
|
||||
|
||||
// Releases any reserved address space. |AllocPages| calls this automatically on
|
||||
// an allocation failure. External allocators may also call this on failure.
|
||||
//
|
||||
// Returns true when an existing reservation was released.
|
||||
BASE_EXPORT bool ReleaseReservation();
|
||||
|
||||
// Returns true if there is currently an address space reservation.
|
||||
BASE_EXPORT bool HasReservationForTesting();
|
||||
|
||||
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
|
||||
// (POSIX) or |VirtualAlloc| (Windows) fails.
|
||||
BASE_EXPORT uint32_t GetAllocPageErrorCode();
|
||||
|
||||
// Returns the total amount of mapped pages from all clients of
|
||||
// PageAllocator. These pages may or may not be committed. This is mostly useful
|
||||
// to assess address space pressure.
|
||||
BASE_EXPORT size_t GetTotalMappedSize();
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
|
@ -0,0 +1,117 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
|
||||
#include <mach/vm_page_size.h>
|
||||
|
||||
// Although page allocator constants are not constexpr, they are run-time
|
||||
// constant. Because the underlying variables they access, such as vm_page_size,
|
||||
// are not marked const, the compiler normally has no way to know that they
|
||||
// don’t change and must obtain their values whenever it can't prove that they
|
||||
// haven't been modified, even if they had already been obtained previously.
|
||||
// Attaching __attribute__((const)) to these declarations allows these redundant
|
||||
// accesses to be omitted under optimization such as common subexpression
|
||||
// elimination.
|
||||
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||
|
||||
#else
|
||||
|
||||
// When defined, page size constants are fixed at compile time. When not
|
||||
// defined, they may vary at run time.
|
||||
#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
|
||||
|
||||
// Use this macro to declare a function as constexpr or not based on whether
|
||||
// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
|
||||
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
|
||||
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularityShift() {
|
||||
#if defined(OS_WIN) || defined(ARCH_CPU_PPC64)
|
||||
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
|
||||
// sizes. Since 64kB is the de facto standard on the platform and binaries
|
||||
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
|
||||
// here.
|
||||
return 16; // 64kB
|
||||
#elif defined(_MIPS_ARCH_LOONGSON)
|
||||
return 14; // 16kB
|
||||
#elif defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
return vm_page_shift;
|
||||
#else
|
||||
return 12; // 4kB
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularity() {
|
||||
#if defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
|
||||
// below, but was separated out for OS_APPLE to avoid << on a non-constexpr.
|
||||
return vm_page_size;
|
||||
#else
|
||||
return 1 << PageAllocationGranularityShift();
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularityOffsetMask() {
|
||||
return PageAllocationGranularity() - 1;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularityBaseMask() {
|
||||
return ~PageAllocationGranularityOffsetMask();
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SystemPageShift() {
|
||||
// On Windows allocation granularity is higher than the page size. This comes
|
||||
// into play when reserving address space range (allocation granularity),
|
||||
// compared to committing pages into memory (system page granularity).
|
||||
#if defined(OS_WIN)
|
||||
return 12; // 4096=1<<12
|
||||
#else
|
||||
return PageAllocationGranularityShift();
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SystemPageSize() {
|
||||
#if defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
// This is literally equivalent to |1 << SystemPageShift()| below, but was
|
||||
// separated out for 64-bit OS_APPLE to avoid << on a non-constexpr.
|
||||
return PageAllocationGranularity();
|
||||
#else
|
||||
return 1 << SystemPageShift();
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SystemPageOffsetMask() {
|
||||
return SystemPageSize() - 1;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SystemPageBaseMask() {
|
||||
return ~SystemPageOffsetMask();
|
||||
}
|
||||
|
||||
constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
|
||||
constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
|
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
||||
|
||||
namespace base {
|
||||
|
||||
void* SystemAllocPages(void* hint,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag);
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
|
@ -0,0 +1,237 @@
|
||||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// This file implements memory allocation primitives for PageAllocator using
|
||||
// Fuchsia's VMOs (Virtual Memory Objects). VMO API is documented in
|
||||
// https://fuchsia.dev/fuchsia-src/zircon/objects/vm_object . A VMO is a kernel
|
||||
// object that corresponds to a set of memory pages. VMO pages may be mapped
|
||||
// to an address space. The code below creates VMOs for each memory allocations
|
||||
// and maps them to the default address space of the current process.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
||||
|
||||
#include <lib/zx/vmar.h>
|
||||
#include <lib/zx/vmo.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/fuchsia/fuchsia_logging.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
// Returns VMO name for a PageTag.
|
||||
const char* PageTagToName(PageTag tag) {
|
||||
switch (tag) {
|
||||
case PageTag::kBlinkGC:
|
||||
return "cr_blink_gc";
|
||||
case PageTag::kPartitionAlloc:
|
||||
return "cr_partition_alloc";
|
||||
case PageTag::kChromium:
|
||||
return "cr_chromium";
|
||||
case PageTag::kV8:
|
||||
return "cr_v8";
|
||||
default:
|
||||
PA_DCHECK(false);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
zx_vm_option_t PageAccessibilityToZxVmOptions(
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
switch (accessibility) {
|
||||
case PageRead:
|
||||
return ZX_VM_PERM_READ;
|
||||
case PageReadWrite:
|
||||
case PageReadWriteTagged:
|
||||
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
|
||||
case PageReadExecuteProtected:
|
||||
case PageReadExecute:
|
||||
return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
|
||||
case PageReadWriteExecute:
|
||||
return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
|
||||
default:
|
||||
PA_NOTREACHED();
|
||||
FALLTHROUGH;
|
||||
case PageInaccessible:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// zx_vmar_map() will fail if the VMO cannot be mapped at |vmar_offset|, i.e.
|
||||
// |hint| is not advisory.
|
||||
constexpr bool kHintIsAdvisory = false;
|
||||
|
||||
std::atomic<int32_t> s_allocPageErrorCode{0};
|
||||
|
||||
void* SystemAllocPagesInternal(void* hint,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
zx::vmo vmo;
|
||||
zx_status_t status = zx::vmo::create(length, 0, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
ZX_DLOG(INFO, status) << "zx_vmo_create";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const char* vmo_name = PageTagToName(page_tag);
|
||||
status = vmo.set_property(ZX_PROP_NAME, vmo_name, strlen(vmo_name));
|
||||
|
||||
// VMO names are used only for debugging, so failure to set a name is not
|
||||
// fatal.
|
||||
ZX_DCHECK(status == ZX_OK, status);
|
||||
|
||||
if (page_tag == PageTag::kV8) {
|
||||
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
|
||||
// in the new VMO.
|
||||
status = vmo.replace_as_executable(zx::resource(), &vmo);
|
||||
if (status != ZX_OK) {
|
||||
ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
zx_vm_option_t options = PageAccessibilityToZxVmOptions(accessibility);
|
||||
|
||||
uint64_t vmar_offset = 0;
|
||||
if (hint) {
|
||||
vmar_offset = reinterpret_cast<uint64_t>(hint);
|
||||
options |= ZX_VM_SPECIFIC;
|
||||
}
|
||||
|
||||
uint64_t address;
|
||||
status =
|
||||
zx::vmar::root_self()->map(options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, length, &address);
|
||||
if (status != ZX_OK) {
|
||||
// map() is expected to fail if |hint| is set to an already-in-use location.
|
||||
if (!hint) {
|
||||
ZX_DLOG(ERROR, status) << "zx_vmar_map";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(address);
|
||||
}
|
||||
|
||||
void* TrimMappingInternal(void* base,
|
||||
size_t base_length,
|
||||
size_t trim_length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
size_t pre_slack,
|
||||
size_t post_slack) {
|
||||
PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
|
||||
|
||||
uint64_t base_address = reinterpret_cast<uint64_t>(base);
|
||||
|
||||
// Unmap head if necessary.
|
||||
if (pre_slack) {
|
||||
zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack);
|
||||
ZX_CHECK(status == ZX_OK, status);
|
||||
}
|
||||
|
||||
// Unmap tail if necessary.
|
||||
if (post_slack) {
|
||||
zx_status_t status = zx::vmar::root_self()->unmap(
|
||||
base_address + pre_slack + trim_length, post_slack);
|
||||
ZX_CHECK(status == ZX_OK, status);
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(base_address + pre_slack);
|
||||
}
|
||||
|
||||
bool TrySetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
zx_status_t status = zx::vmar::root_self()->protect(
|
||||
PageAccessibilityToZxVmOptions(accessibility),
|
||||
reinterpret_cast<uint64_t>(address), length);
|
||||
return status == ZX_OK;
|
||||
}
|
||||
|
||||
void SetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
zx_status_t status = zx::vmar::root_self()->protect(
|
||||
PageAccessibilityToZxVmOptions(accessibility),
|
||||
reinterpret_cast<uint64_t>(address), length);
|
||||
ZX_CHECK(status == ZX_OK, status);
|
||||
}
|
||||
|
||||
void FreePagesInternal(void* address, size_t length) {
|
||||
uint64_t address_int = reinterpret_cast<uint64_t>(address);
|
||||
zx_status_t status = zx::vmar::root_self()->unmap(address_int, length);
|
||||
ZX_CHECK(status == ZX_OK, status);
|
||||
}
|
||||
|
||||
void DiscardSystemPagesInternal(void* address, size_t length) {
|
||||
// TODO(https://crbug.com/1022062): Mark pages as discardable, rather than
|
||||
// forcibly de-committing them immediately, when Fuchsia supports it.
|
||||
uint64_t address_int = reinterpret_cast<uint64_t>(address);
|
||||
zx_status_t status = zx::vmar::root_self()->op_range(
|
||||
ZX_VMO_OP_DECOMMIT, address_int, length, nullptr, 0);
|
||||
ZX_CHECK(status == ZX_OK, status);
|
||||
}
|
||||
|
||||
void DecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
if (accessibility_disposition == PageUpdatePermissions) {
|
||||
SetSystemPagesAccess(address, length, PageInaccessible);
|
||||
}
|
||||
|
||||
// TODO(https://crbug.com/1022062): Review whether this implementation is
|
||||
// still appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
|
||||
// discardable API.
|
||||
DiscardSystemPagesInternal(address, length);
|
||||
}
|
||||
|
||||
void DecommitAndZeroSystemPagesInternal(void* address, size_t length) {
|
||||
SetSystemPagesAccess(address, length, PageInaccessible);
|
||||
|
||||
// TODO(https://crbug.com/1022062): this implementation will likely no longer
|
||||
// be appropriate once DiscardSystemPagesInternal() migrates to a "lazy"
|
||||
// discardable API.
|
||||
DiscardSystemPagesInternal(address, length);
|
||||
}
|
||||
|
||||
void RecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// On Fuchsia systems, the caller needs to simply read the memory to recommit
|
||||
// it. However, if decommit changed the permissions, recommit has to change
|
||||
// them back.
|
||||
if (accessibility_disposition == PageUpdatePermissions) {
|
||||
SetSystemPagesAccess(address, length, accessibility);
|
||||
}
|
||||
}
|
||||
|
||||
bool TryRecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// On Fuchsia systems, the caller needs to simply read the memory to recommit
|
||||
// it. However, if decommit changed the permissions, recommit has to change
|
||||
// them back.
|
||||
if (accessibility_disposition == PageUpdatePermissions) {
|
||||
return TrySetSystemPagesAccess(address, length, accessibility);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_FUCHSIA_H_
|
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/cpu.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
// PROT_BTI requests a page that supports BTI landing pads.
|
||||
#define PROT_BTI 0x10
|
||||
// PROT_MTE requests a page that's suitable for memory tagging.
|
||||
#define PROT_MTE 0x20
|
||||
|
||||
namespace base {
|
||||
|
||||
// Two helper functions to detect whether we can safely use PROT_BTI
|
||||
// and PROT_MTE (static CPU triggers a -Wexit-time-destructors warning.)
|
||||
static bool HasCPUBranchIdentification() {
|
||||
#if defined(ARCH_CPU_ARM_FAMILY)
|
||||
CPU cpu = CPU::CreateNoAllocation();
|
||||
return cpu.has_bti();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool HasCPUMemoryTaggingExtension() {
|
||||
#if defined(ARCH_CPU_ARM_FAMILY)
|
||||
CPU cpu = CPU::CreateNoAllocation();
|
||||
return cpu.has_mte();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
|
||||
static const bool has_bti = HasCPUBranchIdentification();
|
||||
static const bool has_mte = HasCPUMemoryTaggingExtension();
|
||||
switch (accessibility) {
|
||||
case PageRead:
|
||||
return PROT_READ;
|
||||
case PageReadWrite:
|
||||
return PROT_READ | PROT_WRITE;
|
||||
case PageReadWriteTagged:
|
||||
return PROT_READ | PROT_WRITE | (has_mte ? PROT_MTE : 0u);
|
||||
case PageReadExecuteProtected:
|
||||
return PROT_READ | PROT_EXEC | (has_bti ? PROT_BTI : 0u);
|
||||
case PageReadExecute:
|
||||
return PROT_READ | PROT_EXEC;
|
||||
case PageReadWriteExecute:
|
||||
return PROT_READ | PROT_WRITE | PROT_EXEC;
|
||||
default:
|
||||
PA_NOTREACHED();
|
||||
FALLTHROUGH;
|
||||
case PageInaccessible:
|
||||
return PROT_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base
|
@ -0,0 +1,362 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/posix/eintr_wrapper.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
#include "base/mac/foundation_util.h"
|
||||
#include "base/mac/mac_util.h"
|
||||
#include "base/mac/scoped_cftyperef.h"
|
||||
|
||||
#include <Availability.h>
|
||||
#include <Security/Security.h>
|
||||
#include <mach/mach.h>
|
||||
#endif
|
||||
#if defined(OS_ANDROID)
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
#if defined(OS_LINUX) || defined(OS_CHROMEOS)
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
|
||||
// SecTaskGetCodeSignStatus is marked as unavailable on macOS, although it’s
|
||||
// available on iOS and other Apple operating systems. It is, in fact, present
|
||||
// on the system since macOS 10.12.
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wavailability"
|
||||
uint32_t SecTaskGetCodeSignStatus(SecTaskRef task)
|
||||
#if __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12
|
||||
// When redeclaring something previously declared as unavailable, the
|
||||
// weak_import attribute won’t be applied unless manually set.
|
||||
__attribute__((weak_import))
|
||||
#endif // DT < 10.12
|
||||
API_AVAILABLE(macos(10.12));
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
#endif // OS_APPLE
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
#if defined(OS_ANDROID)
|
||||
const char* PageTagToName(PageTag tag) {
|
||||
// Important: All the names should be string literals. As per prctl.h in
|
||||
// //third_party/android_ndk the kernel keeps a pointer to the name instead
|
||||
// of copying it.
|
||||
//
|
||||
// Having the name in .rodata ensures that the pointer remains valid as
|
||||
// long as the mapping is alive.
|
||||
switch (tag) {
|
||||
case PageTag::kBlinkGC:
|
||||
return "blink_gc";
|
||||
case PageTag::kPartitionAlloc:
|
||||
return "partition_alloc";
|
||||
case PageTag::kChromium:
|
||||
return "chromium";
|
||||
case PageTag::kV8:
|
||||
return "v8";
|
||||
default:
|
||||
PA_DCHECK(false);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
#endif // defined(OS_ANDROID)
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
// Tests whether the version of macOS supports the MAP_JIT flag and if the
|
||||
// current process is signed with the hardened runtime and the allow-jit
|
||||
// entitlement, returning whether MAP_JIT should be used to allocate regions
|
||||
// that will contain JIT-compiled executable code.
|
||||
bool UseMapJit() {
|
||||
if (!mac::IsAtLeastOS10_14()) {
|
||||
// MAP_JIT existed before macOS 10.14, but had somewhat different semantics.
|
||||
// Only one MAP_JIT region was permitted per process, but calling code here
|
||||
// will very likely require more than one such region. Since MAP_JIT is not
|
||||
// strictly necessary to write code to a region and then execute it on these
|
||||
// older OSes, don’t use it at all.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Until determining that the hardened runtime is enabled, early returns will
|
||||
// return true, so that MAP_JIT will be used. This is important on arm64,
|
||||
// which only allows pages to be simultaneously writable and executable when
|
||||
// in a region allocated with MAP_JIT, regardless of code signing options. On
|
||||
// arm64, an attempt to set a non-MAP_JIT page as simultaneously writable and
|
||||
// executable fails with EPERM. Although this is not enforced on x86_64,
|
||||
// MAP_JIT is harmless in that case.
|
||||
|
||||
ScopedCFTypeRef<SecTaskRef> task(SecTaskCreateFromSelf(kCFAllocatorDefault));
|
||||
if (!task) {
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t flags = SecTaskGetCodeSignStatus(task);
|
||||
if (!(flags & kSecCodeSignatureRuntime)) {
|
||||
// The hardened runtime is not enabled. Note that kSecCodeSignatureRuntime
|
||||
// == CS_RUNTIME.
|
||||
return true;
|
||||
}
|
||||
|
||||
// The hardened runtime is enabled. From this point on, early returns must
|
||||
// return false, indicating that MAP_JIT is not to be used. It’s an error
|
||||
// (EINVAL) to use MAP_JIT with the hardened runtime unless the JIT
|
||||
// entitlement is specified.
|
||||
|
||||
ScopedCFTypeRef<CFTypeRef> jit_entitlement(SecTaskCopyValueForEntitlement(
|
||||
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
|
||||
if (!jit_entitlement)
|
||||
return false;
|
||||
|
||||
return mac::CFCast<CFBooleanRef>(jit_entitlement.get()) == kCFBooleanTrue;
|
||||
}
|
||||
#endif // defined(OS_APPLE)
|
||||
|
||||
} // namespace
|
||||
|
||||
// |mmap| uses a nearby address if the hint address is blocked.
|
||||
constexpr bool kHintIsAdvisory = true;
|
||||
std::atomic<int32_t> s_allocPageErrorCode{0};
|
||||
|
||||
int GetAccessFlags(PageAccessibilityConfiguration accessibility);
|
||||
|
||||
void* SystemAllocPagesInternal(void* hint,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
#if defined(OS_APPLE)
|
||||
// Use a custom tag to make it easier to distinguish Partition Alloc regions
|
||||
// in vmmap(1). Tags between 240-255 are supported.
|
||||
PA_DCHECK(PageTag::kFirst <= page_tag);
|
||||
PA_DCHECK(PageTag::kLast >= page_tag);
|
||||
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
|
||||
#else
|
||||
int fd = -1;
|
||||
#endif
|
||||
|
||||
int access_flag = GetAccessFlags(accessibility);
|
||||
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
// On macOS 10.14 and higher, executables that are code signed with the
|
||||
// "runtime" option cannot execute writable memory by default. They can opt
|
||||
// into this capability by specifying the "com.apple.security.cs.allow-jit"
|
||||
// code signing entitlement and allocating the region with the MAP_JIT flag.
|
||||
static const bool kUseMapJit = UseMapJit();
|
||||
if (page_tag == PageTag::kV8 && kUseMapJit) {
|
||||
map_flags |= MAP_JIT;
|
||||
}
|
||||
#endif
|
||||
|
||||
void* ret = mmap(hint, length, access_flag, map_flags, fd, 0);
|
||||
if (ret == MAP_FAILED) {
|
||||
s_allocPageErrorCode = errno;
|
||||
ret = nullptr;
|
||||
}
|
||||
|
||||
#if defined(OS_ANDROID)
|
||||
// On Android, anonymous mappings can have a name attached to them. This is
|
||||
// useful for debugging, and double-checking memory attribution.
|
||||
if (ret) {
|
||||
// No error checking on purpose, testing only.
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
|
||||
PageTagToName(page_tag));
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool TrySetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
return 0 ==
|
||||
HANDLE_EINTR(mprotect(address, length, GetAccessFlags(accessibility)));
|
||||
}
|
||||
|
||||
void SetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
int access_flags = GetAccessFlags(accessibility);
|
||||
const int ret = HANDLE_EINTR(mprotect(address, length, access_flags));
|
||||
|
||||
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
|
||||
// kernel data structures cannot be allocated, (2) the address range is
|
||||
// invalid, or (3) this would split an existing mapping in a way that would
|
||||
// exceed the maximum number of allowed mappings.
|
||||
//
|
||||
// Neither are very likely, but we still get a lot of crashes here. This is
|
||||
// because setrlimit(RLIMIT_DATA)'s limit is checked and enforced here, if the
|
||||
// access flags match a "data" mapping, which in our case would be MAP_PRIVATE
|
||||
// | MAP_ANONYMOUS, and PROT_WRITE. see the call to may_expand_vm() in
|
||||
// mm/mprotect.c in the kernel for details.
|
||||
//
|
||||
// In this case, we are almost certainly bumping into the sandbox limit, mark
|
||||
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
|
||||
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
|
||||
OOM_CRASH(length);
|
||||
|
||||
PA_PCHECK(0 == ret);
|
||||
}
|
||||
|
||||
void FreePagesInternal(void* address, size_t length) {
|
||||
PA_PCHECK(0 == munmap(address, length));
|
||||
}
|
||||
|
||||
void* TrimMappingInternal(void* base,
|
||||
size_t base_length,
|
||||
size_t trim_length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
size_t pre_slack,
|
||||
size_t post_slack) {
|
||||
void* ret = base;
|
||||
// We can resize the allocation run. Release unneeded memory before and after
|
||||
// the aligned range.
|
||||
if (pre_slack) {
|
||||
FreePages(base, pre_slack);
|
||||
ret = reinterpret_cast<char*>(base) + pre_slack;
|
||||
}
|
||||
if (post_slack) {
|
||||
FreePages(reinterpret_cast<char*>(ret) + trim_length, post_slack);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// In POSIX, there is no decommit concept. Discarding is an effective way of
|
||||
// implementing the Windows semantics where the OS is allowed to not swap the
|
||||
// pages in the region.
|
||||
DiscardSystemPages(address, length);
|
||||
|
||||
bool change_permissions = accessibility_disposition == PageUpdatePermissions;
|
||||
#if DCHECK_IS_ON()
|
||||
// This is not guaranteed, show that we're serious.
|
||||
//
|
||||
// More specifically, several callers have had issues with assuming that
|
||||
// memory is zeroed, this would hopefully make these bugs more visible. We
|
||||
// don't memset() everything, because ranges can be very large, and doing it
|
||||
// over the entire range could make Chrome unusable with DCHECK_IS_ON().
|
||||
//
|
||||
// Only do it when we are about to change the permissions, since we don't know
|
||||
// the previous permissions, and cannot restore them.
|
||||
if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) {
|
||||
// Memory may not be writable.
|
||||
size_t size = std::min(length, 2 * SystemPageSize());
|
||||
PA_CHECK(mprotect(address, size, PROT_WRITE) == 0);
|
||||
memset(address, 0xcc, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Make pages inaccessible, unless the caller requested to keep permissions.
|
||||
//
|
||||
// Note, there is a small window between these calls when the pages can be
|
||||
// incorrectly touched and brought back to memory. Not ideal, but doing those
|
||||
// operations in the opposite order resulted in PMF regression on Mac (see
|
||||
// crbug.com/1153021).
|
||||
if (change_permissions) {
|
||||
SetSystemPagesAccess(address, length, PageInaccessible);
|
||||
}
|
||||
}
|
||||
|
||||
void DecommitAndZeroSystemPagesInternal(void* address, size_t length) {
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If
|
||||
// a MAP_FIXED request is successful, then any previous mappings [...] for
|
||||
// those whole pages containing any part of the address range [pa,pa+len)
|
||||
// shall be removed, as if by an appropriate call to munmap(), before the
|
||||
// new mapping is established." As a consequence, the memory will be
|
||||
// zero-initialized on next access.
|
||||
void* ptr = mmap(address, length, PROT_NONE,
|
||||
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
PA_CHECK(ptr == address);
|
||||
}
|
||||
|
||||
void RecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// On POSIX systems, the caller needs to simply read the memory to recommit
|
||||
// it. However, if decommit changed the permissions, recommit has to change
|
||||
// them back.
|
||||
if (accessibility_disposition == PageUpdatePermissions) {
|
||||
SetSystemPagesAccess(address, length, accessibility);
|
||||
}
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
// On macOS, to update accounting, we need to make another syscall. For more
|
||||
// details, see https://crbug.com/823915.
|
||||
madvise(address, length, MADV_FREE_REUSE);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool TryRecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// On POSIX systems, the caller needs to simply read the memory to recommit
|
||||
// it. However, if decommit changed the permissions, recommit has to change
|
||||
// them back.
|
||||
if (accessibility_disposition == PageUpdatePermissions) {
|
||||
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
|
||||
if (!ok)
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(OS_APPLE)
|
||||
// On macOS, to update accounting, we need to make another syscall. For more
|
||||
// details, see https://crbug.com/823915.
|
||||
madvise(address, length, MADV_FREE_REUSE);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DiscardSystemPagesInternal(void* address, size_t length) {
|
||||
#if defined(OS_APPLE)
|
||||
int ret = madvise(address, length, MADV_FREE_REUSABLE);
|
||||
if (ret) {
|
||||
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
|
||||
ret = madvise(address, length, MADV_DONTNEED);
|
||||
}
|
||||
PA_PCHECK(ret == 0);
|
||||
#else
|
||||
// We have experimented with other flags, but with suboptimal results.
|
||||
//
|
||||
// MADV_FREE (Linux): Makes our memory measurements less predictable;
|
||||
// performance benefits unclear.
|
||||
//
|
||||
// Therefore, we just do the simple thing: MADV_DONTNEED.
|
||||
PA_PCHECK(0 == madvise(address, length, MADV_DONTNEED));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
|
@ -0,0 +1,177 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
|
||||
constexpr bool kHintIsAdvisory = false;
|
||||
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
|
||||
|
||||
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
|
||||
switch (accessibility) {
|
||||
case PageRead:
|
||||
return PAGE_READONLY;
|
||||
case PageReadWrite:
|
||||
case PageReadWriteTagged:
|
||||
return PAGE_READWRITE;
|
||||
case PageReadExecute:
|
||||
case PageReadExecuteProtected:
|
||||
return PAGE_EXECUTE_READ;
|
||||
case PageReadWriteExecute:
|
||||
return PAGE_EXECUTE_READWRITE;
|
||||
default:
|
||||
PA_NOTREACHED();
|
||||
FALLTHROUGH;
|
||||
case PageInaccessible:
|
||||
return PAGE_NOACCESS;
|
||||
}
|
||||
}
|
||||
|
||||
void* SystemAllocPagesInternal(void* hint,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageTag page_tag) {
|
||||
DWORD access_flag = GetAccessFlags(accessibility);
|
||||
const DWORD type_flags = (accessibility != PageInaccessible)
|
||||
? (MEM_RESERVE | MEM_COMMIT)
|
||||
: MEM_RESERVE;
|
||||
void* ret = VirtualAlloc(hint, length, type_flags, access_flag);
|
||||
if (ret == nullptr) {
|
||||
s_allocPageErrorCode = GetLastError();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void* TrimMappingInternal(void* base,
|
||||
size_t base_length,
|
||||
size_t trim_length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
size_t pre_slack,
|
||||
size_t post_slack) {
|
||||
void* ret = base;
|
||||
if (pre_slack || post_slack) {
|
||||
// We cannot resize the allocation run. Free it and retry at the aligned
|
||||
// address within the freed range.
|
||||
ret = reinterpret_cast<char*>(base) + pre_slack;
|
||||
FreePages(base, base_length);
|
||||
ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool TrySetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
if (accessibility == PageInaccessible)
|
||||
return VirtualFree(address, length, MEM_DECOMMIT) != 0;
|
||||
return nullptr != VirtualAlloc(address, length, MEM_COMMIT,
|
||||
GetAccessFlags(accessibility));
|
||||
}
|
||||
|
||||
void SetSystemPagesAccessInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
if (accessibility == PageInaccessible) {
|
||||
if (!VirtualFree(address, length, MEM_DECOMMIT)) {
|
||||
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
|
||||
// report we get the error number.
|
||||
PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
|
||||
}
|
||||
} else {
|
||||
if (!VirtualAlloc(address, length, MEM_COMMIT,
|
||||
GetAccessFlags(accessibility))) {
|
||||
int32_t error = GetLastError();
|
||||
if (error == ERROR_COMMITMENT_LIMIT)
|
||||
OOM_CRASH(length);
|
||||
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
|
||||
// report we get the error number.
|
||||
PA_CHECK(ERROR_SUCCESS == error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FreePagesInternal(void* address, size_t length) {
|
||||
PA_CHECK(VirtualFree(address, 0, MEM_RELEASE));
|
||||
}
|
||||
|
||||
void DecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||
// making pages inaccessible.
|
||||
SetSystemPagesAccess(address, length, PageInaccessible);
|
||||
}
|
||||
|
||||
void DecommitAndZeroSystemPagesInternal(void* address, size_t length) {
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
|
||||
// "If a page is decommitted but not released, its state changes to reserved.
|
||||
// Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
|
||||
// release it. Attempts to read from or write to a reserved page results in an
|
||||
// access violation exception."
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
|
||||
// for MEM_COMMIT: "The function also guarantees that when the caller later
|
||||
// initially accesses the memory, the contents will be zero."
|
||||
PA_CHECK(VirtualFree(address, length, MEM_DECOMMIT));
|
||||
}
|
||||
|
||||
void RecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||
// making pages inaccessible.
|
||||
SetSystemPagesAccess(address, length, accessibility);
|
||||
}
|
||||
|
||||
bool TryRecommitSystemPagesInternal(
|
||||
void* address,
|
||||
size_t length,
|
||||
PageAccessibilityConfiguration accessibility,
|
||||
PageAccessibilityDisposition accessibility_disposition) {
|
||||
// Ignore accessibility_disposition, because decommitting is equivalent to
|
||||
// making pages inaccessible.
|
||||
return TrySetSystemPagesAccess(address, length, accessibility);
|
||||
}
|
||||
|
||||
void DiscardSystemPagesInternal(void* address, size_t length) {
|
||||
// On Windows, discarded pages are not returned to the system immediately and
|
||||
// not guaranteed to be zeroed when returned to the application.
|
||||
using DiscardVirtualMemoryFunction =
|
||||
DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
|
||||
static DiscardVirtualMemoryFunction discard_virtual_memory =
|
||||
reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
|
||||
if (discard_virtual_memory ==
|
||||
reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
|
||||
discard_virtual_memory =
|
||||
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
|
||||
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
|
||||
// Use DiscardVirtualMemory when available because it releases faster than
|
||||
// MEM_RESET.
|
||||
DWORD ret = 1;
|
||||
if (discard_virtual_memory) {
|
||||
ret = discard_virtual_memory(address, length);
|
||||
}
|
||||
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
|
||||
// failure.
|
||||
if (ret) {
|
||||
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
|
||||
PA_CHECK(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
|
@ -0,0 +1,104 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
|
||||
#include <array>
|
||||
#include <ostream>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
#include "base/bits.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
constexpr std::array<size_t, 2> PartitionAddressSpace::kPoolSizes;
|
||||
|
||||
uintptr_t PartitionAddressSpace::reserved_base_address_ = 0;
|
||||
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
|
||||
// reserved address space. Therefore, set *_pool_base_address_ initially to
|
||||
// k*PoolOffsetMask, so that PartitionAddressSpace::IsIn*Pool() always returns
|
||||
// false.
|
||||
uintptr_t PartitionAddressSpace::non_brp_pool_base_address_ =
|
||||
kNonBRPPoolOffsetMask;
|
||||
uintptr_t PartitionAddressSpace::brp_pool_base_address_ = kBRPPoolOffsetMask;
|
||||
|
||||
pool_handle PartitionAddressSpace::non_brp_pool_ = 0;
|
||||
pool_handle PartitionAddressSpace::brp_pool_ = 0;
|
||||
|
||||
void PartitionAddressSpace::Init() {
|
||||
if (IsInitialized())
|
||||
return;
|
||||
|
||||
GigaCageProperties properties = CalculateGigaCageProperties(kPoolSizes);
|
||||
|
||||
reserved_base_address_ =
|
||||
reinterpret_cast<uintptr_t>(AllocPagesWithAlignOffset(
|
||||
nullptr, properties.size, properties.alignment,
|
||||
properties.alignment_offset, base::PageInaccessible,
|
||||
PageTag::kPartitionAlloc));
|
||||
PA_CHECK(reserved_base_address_);
|
||||
|
||||
uintptr_t current = reserved_base_address_;
|
||||
|
||||
non_brp_pool_base_address_ = current;
|
||||
PA_DCHECK(!(non_brp_pool_base_address_ & (kNonBRPPoolSize - 1)));
|
||||
non_brp_pool_ = internal::AddressPoolManager::GetInstance()->Add(
|
||||
current, kNonBRPPoolSize);
|
||||
PA_CHECK(non_brp_pool_ == kNonBRPPoolHandle);
|
||||
PA_DCHECK(!IsInNonBRPPool(reinterpret_cast<void*>(current - 1)));
|
||||
PA_DCHECK(IsInNonBRPPool(reinterpret_cast<void*>(current)));
|
||||
current += kNonBRPPoolSize;
|
||||
PA_DCHECK(IsInNonBRPPool(reinterpret_cast<void*>(current - 1)));
|
||||
PA_DCHECK(!IsInNonBRPPool(reinterpret_cast<void*>(current)));
|
||||
|
||||
brp_pool_base_address_ = current;
|
||||
PA_DCHECK(!(brp_pool_base_address_ & (kBRPPoolSize - 1)));
|
||||
brp_pool_ =
|
||||
internal::AddressPoolManager::GetInstance()->Add(current, kBRPPoolSize);
|
||||
PA_CHECK(brp_pool_ == kBRPPoolHandle);
|
||||
PA_DCHECK(!IsInBRPPool(reinterpret_cast<void*>(current - 1)));
|
||||
PA_DCHECK(IsInBRPPool(reinterpret_cast<void*>(current)));
|
||||
current += kBRPPoolSize;
|
||||
PA_DCHECK(IsInBRPPool(reinterpret_cast<void*>(current - 1)));
|
||||
PA_DCHECK(!IsInBRPPool(reinterpret_cast<void*>(current)));
|
||||
|
||||
#if PA_STARSCAN_USE_CARD_TABLE
|
||||
// Reserve memory for PCScan quarantine card table.
|
||||
void* requested_address = reinterpret_cast<void*>(non_brp_pool_base_address_);
|
||||
char* actual_address = internal::AddressPoolManager::GetInstance()->Reserve(
|
||||
non_brp_pool_, requested_address, kSuperPageSize);
|
||||
PA_CHECK(requested_address == actual_address)
|
||||
<< "QuarantineCardTable is required to be allocated in the beginning of "
|
||||
"the non-BRP pool";
|
||||
#endif // PA_STARSCAN_USE_CARD_TABLE
|
||||
|
||||
PA_DCHECK(reserved_base_address_ + properties.size == current);
|
||||
}
|
||||
|
||||
void PartitionAddressSpace::UninitForTesting() {
|
||||
GigaCageProperties properties = CalculateGigaCageProperties(kPoolSizes);
|
||||
|
||||
FreePages(reinterpret_cast<void*>(reserved_base_address_), properties.size);
|
||||
reserved_base_address_ = 0;
|
||||
non_brp_pool_base_address_ = kNonBRPPoolOffsetMask;
|
||||
brp_pool_base_address_ = kBRPPoolOffsetMask;
|
||||
non_brp_pool_ = 0;
|
||||
brp_pool_ = 0;
|
||||
internal::AddressPoolManager::GetInstance()->ResetForTesting();
|
||||
}
|
||||
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
251
src/base/allocator/partition_allocator/partition_address_space.h
Normal file
251
src/base/allocator/partition_allocator/partition_address_space.h
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <limits>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
#include "build/buildflag.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// The feature is not applicable to 32-bit address space.
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
struct GigaCageProperties {
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
size_t alignment_offset;
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
GigaCageProperties CalculateGigaCageProperties(
|
||||
const std::array<size_t, N>& pool_sizes) {
|
||||
size_t size_sum = 0;
|
||||
size_t alignment = 0;
|
||||
size_t alignment_offset;
|
||||
// The goal is to find properties such that each pool's start address is
|
||||
// aligned to its own size. To achieve that, the largest pool will serve
|
||||
// as an anchor (the first one, if there are more) and it'll be used to
|
||||
// determine the core alignment. The sizes of pools before the anchor will
|
||||
// determine the offset within the core alignment at which the GigaCage will
|
||||
// start.
|
||||
// If this algorithm doesn't find the proper alignment, it means such an
|
||||
// alignment doesn't exist.
|
||||
for (size_t pool_size : pool_sizes) {
|
||||
PA_CHECK(bits::IsPowerOfTwo(pool_size));
|
||||
if (pool_size > alignment) {
|
||||
alignment = pool_size;
|
||||
// This may underflow, leading to a very high value, so use modulo
|
||||
// |alignment| to bring it down.
|
||||
alignment_offset = (alignment - size_sum) & (alignment - 1);
|
||||
}
|
||||
size_sum += pool_size;
|
||||
}
|
||||
// Use PA_CHECK because we can't correctly proceed if any pool's start address
|
||||
// isn't aligned to its own size. Exact initial value of |sample_address|
|
||||
// doesn't matter as long as |address % alignment == alignment_offset|.
|
||||
uintptr_t sample_address = alignment_offset + 7 * alignment;
|
||||
for (size_t pool_size : pool_sizes) {
|
||||
PA_CHECK(!(sample_address & (pool_size - 1)));
|
||||
sample_address += pool_size;
|
||||
}
|
||||
return GigaCageProperties{size_sum, alignment, alignment_offset};
|
||||
}
|
||||
|
||||
// Reserves address space for PartitionAllocator.
|
||||
class BASE_EXPORT PartitionAddressSpace {
|
||||
public:
|
||||
// BRP stands for BackupRefPtr. GigaCage is split into pools, one which
|
||||
// supports BackupRefPtr and one that doesn't.
|
||||
static ALWAYS_INLINE internal::pool_handle GetNonBRPPool() {
|
||||
return non_brp_pool_;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE constexpr uintptr_t NonBRPPoolBaseMask() {
|
||||
return kNonBRPPoolBaseMask;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE internal::pool_handle GetBRPPool() { return brp_pool_; }
|
||||
|
||||
static ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||
const void* address) {
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
PA_DCHECK(!IsInBRPPool(address));
|
||||
#endif
|
||||
pool_handle pool = 0;
|
||||
uintptr_t base = 0;
|
||||
if (IsInNonBRPPool(address)) {
|
||||
pool = GetNonBRPPool();
|
||||
base = non_brp_pool_base_address_;
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
} else if (IsInBRPPool(address)) {
|
||||
pool = GetBRPPool();
|
||||
base = brp_pool_base_address_;
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
} else {
|
||||
PA_NOTREACHED();
|
||||
}
|
||||
uintptr_t address_as_uintptr = reinterpret_cast<uintptr_t>(address);
|
||||
return std::make_pair(pool, address_as_uintptr - base);
|
||||
}
|
||||
|
||||
static void Init();
|
||||
static void UninitForTesting();
|
||||
|
||||
static ALWAYS_INLINE bool IsInitialized() {
|
||||
if (reserved_base_address_) {
|
||||
PA_DCHECK(non_brp_pool_ != 0);
|
||||
PA_DCHECK(brp_pool_ != 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
PA_DCHECK(non_brp_pool_ == 0);
|
||||
PA_DCHECK(brp_pool_ == 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
static ALWAYS_INLINE bool IsInNonBRPPool(const void* address) {
|
||||
return (reinterpret_cast<uintptr_t>(address) & kNonBRPPoolBaseMask) ==
|
||||
non_brp_pool_base_address_;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE uintptr_t NonBRPPoolBase() {
|
||||
return non_brp_pool_base_address_;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
static ALWAYS_INLINE bool IsInBRPPool(const void* address) {
|
||||
return (reinterpret_cast<uintptr_t>(address) & kBRPPoolBaseMask) ==
|
||||
brp_pool_base_address_;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE uintptr_t OffsetInBRPPool(const void* address) {
|
||||
PA_DCHECK(IsInBRPPool(address));
|
||||
return reinterpret_cast<uintptr_t>(address) - brp_pool_base_address_;
|
||||
}
|
||||
|
||||
// PartitionAddressSpace is static_only class.
|
||||
PartitionAddressSpace() = delete;
|
||||
PartitionAddressSpace(const PartitionAddressSpace&) = delete;
|
||||
void* operator new(size_t) = delete;
|
||||
void* operator new(size_t, void*) = delete;
|
||||
|
||||
private:
|
||||
// On 64-bit systems, GigaCage is split into two pools, one with allocations
|
||||
// that have a BRP ref-count, and one with allocations that don't.
|
||||
// +----------------+ reserved_base_address_ (8GiB aligned)
|
||||
// | non-BRP | == non_brp_pool_base_address_
|
||||
// | pool |
|
||||
// +----------------+ reserved_base_address_ + 8GiB
|
||||
// | BRP | == brp_pool_base_address_
|
||||
// | pool |
|
||||
// +----------------+ reserved_base_address_ + 16GiB
|
||||
//
|
||||
// Pool sizes have to be the power of two. Each pool will be aligned at its
|
||||
// own size boundary.
|
||||
//
|
||||
// NOTE! The BRP pool must be preceded by a reserved region, where allocations
|
||||
// are forbidden. This is to prevent a pointer immediately past a non-GigaCage
|
||||
// allocation from falling into the BRP pool, thus triggering BRP mechanism
|
||||
// and likely crashing. One way to implement this is to place another
|
||||
// PartitionAlloc pool right before, because trailing guard pages there will
|
||||
// fulfill this guarantee. Alternatively, it could be any region that
|
||||
// guarantess to not have allocations extending to its very end. But it's just
|
||||
// easier to have non-BRP pool there.
|
||||
//
|
||||
// If more than 2 consecutive pools are ever needed, care will have to be
|
||||
// taken when choosing sizes. For example, for sizes [8GiB,4GiB,8GiB], it'd be
|
||||
// impossible to align each pool at its own size boundary while keeping them
|
||||
// next to each other. CalculateGigaCageProperties() has non-debug, run-time
|
||||
// checks to assert that.
|
||||
static constexpr size_t kNonBRPPoolSize = kPoolMaxSize;
|
||||
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
|
||||
static constexpr size_t kTotalSize = kNonBRPPoolSize + kBRPPoolSize;
|
||||
static constexpr std::array<size_t, 2> kPoolSizes = {kNonBRPPoolSize,
|
||||
kBRPPoolSize};
|
||||
static_assert(bits::IsPowerOfTwo(kNonBRPPoolSize) &&
|
||||
bits::IsPowerOfTwo(kBRPPoolSize),
|
||||
"Each pool size should be a power of two.");
|
||||
|
||||
// Masks used to easy determine belonging to a pool.
|
||||
static constexpr uintptr_t kNonBRPPoolOffsetMask =
|
||||
static_cast<uintptr_t>(kNonBRPPoolSize) - 1;
|
||||
static constexpr uintptr_t kNonBRPPoolBaseMask = ~kNonBRPPoolOffsetMask;
|
||||
static constexpr uintptr_t kBRPPoolOffsetMask =
|
||||
static_cast<uintptr_t>(kBRPPoolSize) - 1;
|
||||
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
|
||||
|
||||
// See the comment describing the address layout above.
|
||||
static uintptr_t reserved_base_address_;
|
||||
static uintptr_t non_brp_pool_base_address_;
|
||||
static uintptr_t brp_pool_base_address_;
|
||||
|
||||
static pool_handle non_brp_pool_;
|
||||
static pool_handle brp_pool_;
|
||||
};
|
||||
|
||||
ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||
const void* address) {
|
||||
return PartitionAddressSpace::GetPoolAndOffset(address);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE pool_handle GetPool(const void* address) {
|
||||
return std::get<0>(GetPoolAndOffset(address));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t OffsetInBRPPool(const void* address) {
|
||||
return PartitionAddressSpace::OffsetInBRPPool(address);
|
||||
}
|
||||
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAlloc(const void* address) {
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
|
||||
#endif
|
||||
return internal::PartitionAddressSpace::IsInNonBRPPool(address)
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|| internal::PartitionAddressSpace::IsInBRPPool(address)
|
||||
#endif
|
||||
;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAllocNonBRPPool(const void* address) {
|
||||
return internal::PartitionAddressSpace::IsInNonBRPPool(address);
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(const void* address) {
|
||||
return internal::PartitionAddressSpace::IsInBRPPool(address);
|
||||
}
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
|
57
src/base/allocator/partition_allocator/partition_alloc-inl.h
Normal file
57
src/base/allocator/partition_allocator/partition_alloc-inl.h
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_cookie.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/allocator/partition_allocator/random.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// Prefetch *x into memory.
|
||||
#if defined(__clang__) || defined(COMPILER_GCC)
|
||||
#define PA_PREFETCH(x) __builtin_prefetch(x)
|
||||
#else
|
||||
#define PA_PREFETCH(x)
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
// This is a `memset` that resists being optimized away. Adapted from
|
||||
// boringssl/src/crypto/mem.c. (Copying and pasting is bad, but //base can't
|
||||
// depend on //third_party, and this is small enough.)
|
||||
ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
|
||||
memset(ptr, value, size);
|
||||
|
||||
// As best as we can tell, this is sufficient to break any optimisations that
|
||||
// might try to eliminate "superfluous" memsets. If there's an easy way to
|
||||
// detect memset_s, it would be better to use that.
|
||||
__asm__ __volatile__("" : : "r"(ptr) : "memory");
|
||||
}
|
||||
|
||||
// Returns true if we've hit the end of a random-length period. We don't want to
|
||||
// invoke `RandomValue` too often, because we call this function in a hot spot
|
||||
// (`Free`), and `RandomValue` incurs the cost of atomics.
|
||||
#if !DCHECK_IS_ON()
|
||||
ALWAYS_INLINE bool RandomPeriod() {
|
||||
static thread_local uint8_t counter = 0;
|
||||
if (UNLIKELY(counter == 0)) {
|
||||
// It's OK to truncate this value.
|
||||
counter = static_cast<uint8_t>(base::RandomValue());
|
||||
}
|
||||
// If `counter` is 0, this will wrap. That is intentional and OK.
|
||||
counter--;
|
||||
return counter == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
|
121
src/base/allocator/partition_allocator/partition_alloc.cc
Normal file
121
src/base/allocator/partition_allocator/partition_alloc.cc
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/memory_reclaimer.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
|
||||
// This is from page_allocator_constants.h and doesn't really fit here, but
|
||||
// there isn't a centralized initialization function in page_allocator.cc, so
|
||||
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
|
||||
STATIC_ASSERT_OR_PA_CHECK((SystemPageSize() & SystemPageOffsetMask()) == 0,
|
||||
"SystemPageSize() must be power of 2");
|
||||
|
||||
// Two partition pages are used as guard / metadata page so make sure the
|
||||
// super page size is bigger.
|
||||
STATIC_ASSERT_OR_PA_CHECK(PartitionPageSize() * 4 <= kSuperPageSize,
|
||||
"ok super page size");
|
||||
STATIC_ASSERT_OR_PA_CHECK((kSuperPageSize & SystemPageOffsetMask()) == 0,
|
||||
"ok super page multiple");
|
||||
// Four system pages gives us room to hack out a still-guard-paged piece
|
||||
// of metadata in the middle of a guard partition page.
|
||||
STATIC_ASSERT_OR_PA_CHECK(SystemPageSize() * 4 <= PartitionPageSize(),
|
||||
"ok partition page size");
|
||||
STATIC_ASSERT_OR_PA_CHECK((PartitionPageSize() & SystemPageOffsetMask()) == 0,
|
||||
"ok partition page multiple");
|
||||
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
|
||||
kPageMetadataSize,
|
||||
"PartitionPage should not be too big");
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
kPageMetadataSize * NumPartitionPagesPerSuperPage() <= SystemPageSize(),
|
||||
"page metadata fits in hole");
|
||||
|
||||
// Limit to prevent callers accidentally overflowing an int size.
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
MaxDirectMapped() <= (1UL << 31) + DirectMapAllocationGranularity(),
|
||||
"maximum direct mapped allocation");
|
||||
|
||||
// Check that some of our zanier calculations worked out as expected.
|
||||
static_assert(kSmallestBucket == kAlignment, "generic smallest bucket");
|
||||
static_assert(kMaxBucketed == 917504, "generic max bucketed");
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
MaxSystemPagesPerRegularSlotSpan() <= 16,
|
||||
"System pages per slot span must be no greater than 16.");
|
||||
|
||||
PA_DCHECK(on_out_of_memory);
|
||||
internal::g_oom_handling_function = on_out_of_memory;
|
||||
}
|
||||
|
||||
void PartitionAllocGlobalUninitForTesting() {
|
||||
internal::PCScan::UninitForTesting(); // IN-TEST
|
||||
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
internal::PartitionAddressSpace::UninitForTesting();
|
||||
#else
|
||||
internal::AddressPoolManager::GetInstance()->ResetForTesting();
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
internal::g_oom_handling_function = nullptr;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
PartitionAllocator<thread_safe>::~PartitionAllocator() {
|
||||
PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
|
||||
&partition_root_);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
|
||||
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
|
||||
#endif
|
||||
partition_root_.Init(opts);
|
||||
partition_root_.ConfigureLazyCommit();
|
||||
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
|
||||
&partition_root_);
|
||||
}
|
||||
|
||||
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
|
||||
template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
|
||||
template PartitionAllocator<internal::NotThreadSafe>::~PartitionAllocator();
|
||||
template void PartitionAllocator<internal::NotThreadSafe>::init(
|
||||
PartitionOptions);
|
||||
|
||||
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
void CheckThatSlotOffsetIsZero(void* ptr) {
|
||||
// Add kPartitionPastAllocationAdjustment, because
|
||||
// PartitionAllocGetSlotStartInBRPPool will subtract it.
|
||||
PA_CHECK(PartitionAllocGetSlotStartInBRPPool(
|
||||
reinterpret_cast<char*>(ptr) +
|
||||
kPartitionPastAllocationAdjustment) == ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
44
src/base/allocator/partition_allocator/partition_alloc.h
Normal file
44
src/base/allocator/partition_allocator/partition_alloc.h
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
|
||||
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
|
||||
|
||||
namespace internal {
|
||||
template <bool thread_safe>
|
||||
struct BASE_EXPORT PartitionAllocator {
|
||||
PartitionAllocator() = default;
|
||||
~PartitionAllocator();
|
||||
|
||||
void init(PartitionOptions);
|
||||
|
||||
ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
|
||||
ALWAYS_INLINE const PartitionRoot<thread_safe>* root() const {
|
||||
return &partition_root_;
|
||||
}
|
||||
|
||||
private:
|
||||
PartitionRoot<thread_safe> partition_root_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
|
||||
using ThreadUnsafePartitionAllocator =
|
||||
internal::PartitionAllocator<internal::NotThreadSafe>;
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
@ -0,0 +1,83 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/check.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/immediate_crash.h"
|
||||
|
||||
#define PA_STRINGIFY_IMPL(s) #s
|
||||
#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
|
||||
|
||||
// When PartitionAlloc is used as the default allocator, we cannot use the
|
||||
// regular (D)CHECK() macros, as they allocate internally. When an assertion is
|
||||
// triggered, they format strings, leading to reentrancy in the code, which none
|
||||
// of PartitionAlloc is designed to support (and especially not for error
|
||||
// paths).
|
||||
//
|
||||
// As a consequence:
|
||||
// - When PartitionAlloc is not malloc(), use the regular macros
|
||||
// - Otherwise, crash immediately. This provides worse error messages though.
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// For official build discard log strings to reduce binary bloat.
|
||||
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
|
||||
// See base/check.h for implementation details.
|
||||
#define PA_CHECK(condition) \
|
||||
UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
|
||||
#else
|
||||
// PartitionAlloc uses async-signal-safe RawCheck() for error reporting.
|
||||
// Async-signal-safe functions are guaranteed to not allocate as otherwise they
|
||||
// could operate with inconsistent allocator state.
|
||||
#define PA_CHECK(condition) \
|
||||
UNLIKELY(!(condition)) \
|
||||
? logging::RawCheck( \
|
||||
__FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
|
||||
: EAT_CHECK_STREAM_PARAMS()
|
||||
#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG)
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
#define PA_DCHECK(condition) PA_CHECK(condition)
|
||||
#else
|
||||
#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
#define PA_PCHECK(condition) \
|
||||
if (!(condition)) { \
|
||||
int error = errno; \
|
||||
base::debug::Alias(&error); \
|
||||
IMMEDIATE_CRASH(); \
|
||||
}
|
||||
|
||||
#else
|
||||
#define PA_CHECK(condition) CHECK(condition)
|
||||
#define PA_DCHECK(condition) DCHECK(condition)
|
||||
#define PA_PCHECK(condition) PCHECK(condition)
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
|
||||
|
||||
// Use this macro to assert on things that are conditionally constexpr as
|
||||
// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
|
||||
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
|
||||
// is a static_assert. Where determined at run time, this is a PA_CHECK.
|
||||
// Therefore, this macro must only be used where both a static_assert and a
|
||||
// PA_CHECK would be viable, that is, within a function, and ideally a function
|
||||
// that executes only once, early in the program, such as during initialization.
|
||||
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
|
||||
static_assert(condition, message)
|
||||
|
||||
#else
|
||||
|
||||
#define STATIC_ASSERT_OR_PA_CHECK(condition, message) \
|
||||
do { \
|
||||
PA_CHECK(condition) << (message); \
|
||||
} while (false)
|
||||
|
||||
#endif
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
|
126
src/base/allocator/partition_allocator/partition_alloc_config.h
Normal file
126
src/base/allocator/partition_allocator/partition_alloc_config.h
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
|
||||
// address space. The only known case where address space is 32-bit is NaCl, so
|
||||
// eliminate it explicitly. static_assert below ensures that others won't slip
|
||||
// through.
|
||||
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
|
||||
#define PA_HAS_64_BITS_POINTERS
|
||||
static_assert(sizeof(void*) == 8, "");
|
||||
#else
|
||||
static_assert(sizeof(void*) != 8, "");
|
||||
#endif
|
||||
|
||||
// BackupRefPtr and PCScan are incompatible, and due to its conservative nature,
|
||||
// it is 64 bits only.
|
||||
// Disable PCScan even for USE_BACKUP_REF_PTR_FAKE, so that a "fake" BRP
|
||||
// experiment is unaffected by PCScan, as a non-fake one would.
|
||||
#if defined(PA_HAS_64_BITS_POINTERS) && !BUILDFLAG(USE_BACKUP_REF_PTR) && \
|
||||
!BUILDFLAG(USE_BACKUP_REF_PTR_FAKE)
|
||||
#define PA_ALLOW_PCSCAN
|
||||
#endif
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS) && \
|
||||
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
|
||||
#define PA_STARSCAN_NEON_SUPPORTED
|
||||
#endif
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS) && \
|
||||
(defined(OS_LINUX) || defined(OS_ANDROID))
|
||||
#include <linux/version.h>
|
||||
// TODO(bikineev): Enable for ChromeOS.
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
|
||||
#define PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED
|
||||
#endif
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
// Disable currently the card table to check the memory improvement.
|
||||
#define PA_STARSCAN_USE_CARD_TABLE 0
|
||||
#else
|
||||
// The card table is permanently disabled for 32-bit.
|
||||
#define PA_STARSCAN_USE_CARD_TABLE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if PA_STARSCAN_USE_CARD_TABLE && !defined(PA_ALLOW_PCSCAN)
|
||||
#error "Card table can only be used when *Scan is allowed"
|
||||
#endif
|
||||
|
||||
// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
|
||||
// features such as futex(2).
|
||||
#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)
|
||||
#define PA_HAS_LINUX_KERNEL
|
||||
#endif
|
||||
|
||||
// On some platforms, we implement locking by spinning in userspace, then going
|
||||
// into the kernel only if there is contention. This requires platform support,
|
||||
// namely:
|
||||
// - On Linux, futex(2)
|
||||
// - On Windows, a fast userspace "try" operation which is available
|
||||
// with SRWLock
|
||||
// - On macOS 10.14+, pthread.
|
||||
//
|
||||
// On macOS, pthread_mutex_trylock() is fast by default starting with macOS
|
||||
// 10.14. Chromium targets an earlier version, so it cannot be known at
|
||||
// compile-time. However, ARM64 macOS devices shipped *after* this release, so
|
||||
// they necessarily have a fast implementation.
|
||||
//
|
||||
// Otherwise, a userspace spinlock implementation is used.
|
||||
#if defined(PA_HAS_LINUX_KERNEL) || defined(OS_WIN) || \
|
||||
(defined(OS_MAC) && defined(ARCH_CPU_ARM64)) || defined(OS_FUCHSIA)
|
||||
#define PA_HAS_FAST_MUTEX
|
||||
#endif
|
||||
|
||||
// If set to 1, enables zeroing memory on Free() with roughly 1% probability.
|
||||
// This applies only to normal buckets, as direct-map allocations are always
|
||||
// decommitted.
|
||||
// TODO(bartekn): Re-enable once PartitionAlloc-Everywhere evaluation is done.
|
||||
#if 0
|
||||
#define PA_ZERO_RANDOMLY_ON_FREE
|
||||
#endif
|
||||
|
||||
// Need TLS support.
|
||||
#if defined(OS_POSIX) || defined(OS_WIN) || defined(OS_FUCHSIA)
|
||||
#define PA_THREAD_CACHE_SUPPORTED
|
||||
#endif
|
||||
|
||||
// Too expensive for official builds, as it adds cache misses to all
|
||||
// allocations. On the other hand, we want wide metrics coverage to get
|
||||
// realistic profiles.
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !defined(OFFICIAL_BUILD)
|
||||
#define PA_THREAD_CACHE_ALLOC_STATS
|
||||
#endif
|
||||
|
||||
// Optional statistics collection. Lightweight, contrary to the ones above,
|
||||
// hence enabled by default.
|
||||
#define PA_THREAD_CACHE_ENABLE_STATISTICS
|
||||
|
||||
// Enable free list hardening as much as possible.
|
||||
//
|
||||
// Disabled when putting refcount in the previous slot, which is what
|
||||
// PUT_REF_COUNT_IN_PREVIOUS_SLOT does. In this case the refcount overlaps with
|
||||
// the next pointer shadow for the smallest bucket.
|
||||
//
|
||||
// Only for Little endian CPUs, as the freelist encoding used on big endian
|
||||
// platforms complicates things. Note that Chromium is not officially supported
|
||||
// on any big endian architecture as well.
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && \
|
||||
defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
#define PA_HAS_FREELIST_HARDENING
|
||||
#endif
|
||||
|
||||
// Specifies whether allocation extras need to be added.
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
#define PA_EXTRAS_REQUIRED
|
||||
#endif
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
|
@ -0,0 +1,343 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
|
||||
|
||||
#include <limits.h>
|
||||
#include <cstddef>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
#include <mach/vm_page_size.h>
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
|
||||
// It is typical for a `PartitionPage` to be based on multiple system pages.
|
||||
// Most references to "page" refer to `PartitionPage`s.
|
||||
//
|
||||
// *Super pages* are the underlying system allocations we make. Super pages
|
||||
// contain multiple partition pages and include space for a small amount of
|
||||
// metadata per partition page.
|
||||
//
|
||||
// Inside super pages, we store *slot spans*. A slot span is a continguous range
|
||||
// of one or more `PartitionPage`s that stores allocations of the same size.
|
||||
// Slot span sizes are adjusted depending on the allocation size, to make sure
|
||||
// the packing does not lead to unused (wasted) space at the end of the last
|
||||
// system page of the span. For our current maximum slot span size of 64 KiB and
|
||||
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
|
||||
// up against the end of a system page.
|
||||
|
||||
#if defined(_MIPS_ARCH_LOONGSON)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return 16; // 64 KiB
|
||||
}
|
||||
#elif defined(ARCH_CPU_PPC64)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return 18; // 256 KiB
|
||||
}
|
||||
#elif defined(OS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return vm_page_shift + 2;
|
||||
}
|
||||
#else
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return 14; // 16 KiB
|
||||
}
|
||||
#endif
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageSize() {
|
||||
return 1 << PartitionPageShift();
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageOffsetMask() {
|
||||
return PartitionPageSize() - 1;
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageBaseMask() {
|
||||
return ~PartitionPageOffsetMask();
|
||||
}
|
||||
|
||||
// Number of system pages per regular slot span. Above this limit, we call it
|
||||
// a single-slot span, as the span literally hosts only one slot, and has
|
||||
// somewhat different implementation. At run-time, single-slot spans can be
|
||||
// differentiated with a call to CanStoreRawSize().
|
||||
// TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
|
||||
// ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
|
||||
constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
|
||||
|
||||
// To avoid fragmentation via never-used freelist entries, we hand out partition
|
||||
// freelist sections gradually, in units of the dominant system page size. What
|
||||
// we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
|
||||
// with freelist pointers right away. Writing freelist pointers will fault and
|
||||
// dirty a private page, which is very wasteful if we never actually store
|
||||
// objects there.
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
NumSystemPagesPerPartitionPage() {
|
||||
return PartitionPageSize() >> SystemPageShift();
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
MaxSystemPagesPerRegularSlotSpan() {
|
||||
return NumSystemPagesPerPartitionPage() *
|
||||
kMaxPartitionPagesPerRegularSlotSpan;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
MaxRegularSlotSpanSize() {
|
||||
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
|
||||
}
|
||||
|
||||
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
|
||||
// These chunks are called *super pages*. We do this so that we can store
|
||||
// metadata in the first few pages of each 2 MiB-aligned section. This makes
|
||||
// freeing memory very fast. 2 MiB size & alignment were chosen, because this
|
||||
// virtual address block represents a full but single page table allocation on
|
||||
// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
|
||||
// (Note, these super pages are backed by 4 KiB system pages and have nothing to
|
||||
// do with OS concept of "huge pages"/"large pages", even though the size
|
||||
// coincides.)
|
||||
//
|
||||
// The layout of the super page is as follows. The sizes below are the same for
|
||||
// 32- and 64-bit platforms.
|
||||
//
|
||||
// +-----------------------+
|
||||
// | Guard page (4 KiB) |
|
||||
// | Metadata page (4 KiB) |
|
||||
// | Guard pages (8 KiB) |
|
||||
// | *Scan State Bitmap |
|
||||
// | Slot span |
|
||||
// | Slot span |
|
||||
// | ... |
|
||||
// | Slot span |
|
||||
// | Guard pages (16 KiB) |
|
||||
// +-----------------------+
|
||||
//
|
||||
// State Bitmap is inserted for partitions that may have quarantine enabled.
|
||||
//
|
||||
// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
|
||||
// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
|
||||
// will be 4KiB.
|
||||
//
|
||||
//...
|
||||
// | Metadata page (4 KiB) |
|
||||
// | RefcountBitmap (4 KiB)|
|
||||
// | Guard pages (4 KiB) |
|
||||
//...
|
||||
//
|
||||
// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
|
||||
// that slot spans of different sizes may co-exist with one super page. Even
|
||||
// slot spans of the same size may support different slot sizes. However, all
|
||||
// slots within a span have to be of the same size.
|
||||
//
|
||||
// The metadata page has the following format. Note that the `PartitionPage`
|
||||
// that is not at the head of a slot span is "unused" (by most part, it only
|
||||
// stores the offset from the head page). In other words, the metadata for the
|
||||
// slot span is stored only in the first `PartitionPage` of the slot span.
|
||||
// Metadata accesses to other `PartitionPage`s are redirected to the first
|
||||
// `PartitionPage`.
|
||||
//
|
||||
// +---------------------------------------------+
|
||||
// | SuperPageExtentEntry (32 B) |
|
||||
// | PartitionPage of slot span 1 (32 B, used) |
|
||||
// | PartitionPage of slot span 1 (32 B, unused) |
|
||||
// | PartitionPage of slot span 1 (32 B, unused) |
|
||||
// | PartitionPage of slot span 2 (32 B, used) |
|
||||
// | PartitionPage of slot span 3 (32 B, used) |
|
||||
// | ... |
|
||||
// | PartitionPage of slot span N (32 B, used) |
|
||||
// | PartitionPage of slot span N (32 B, unused) |
|
||||
// | PartitionPage of slot span N (32 B, unused) |
|
||||
// +---------------------------------------------+
|
||||
//
|
||||
// A direct-mapped page has an identical layout at the beginning to fake it
|
||||
// looking like a super page:
|
||||
//
|
||||
// +---------------------------------+
|
||||
// | Guard page (4 KiB) |
|
||||
// | Metadata page (4 KiB) |
|
||||
// | Guard pages (8 KiB) |
|
||||
// | Direct mapped object |
|
||||
// | Guard page (4 KiB, 32-bit only) |
|
||||
// +---------------------------------+
|
||||
//
|
||||
// A direct-mapped page's metadata page has the following layout (on 64 bit
|
||||
// architectures. On 32 bit ones, the layout is identical, some sizes are
|
||||
// different due to smaller pointers.):
|
||||
//
|
||||
// +----------------------------------+
|
||||
// | SuperPageExtentEntry (32 B) |
|
||||
// | PartitionPage (32 B) |
|
||||
// | PartitionBucket (40 B) |
|
||||
// | PartitionDirectMapExtent (32 B) |
|
||||
// +----------------------------------+
|
||||
//
|
||||
// See |PartitionDirectMapMetadata| for details.
|
||||
|
||||
constexpr size_t kGiB = 1024 * 1024 * 1024ull;
|
||||
constexpr size_t kSuperPageShift = 21; // 2 MiB
|
||||
constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
|
||||
constexpr size_t kSuperPageAlignment = kSuperPageSize;
|
||||
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
|
||||
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
|
||||
|
||||
// GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and
|
||||
// one that doesn't.
|
||||
constexpr size_t kNumPools = 2;
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
constexpr size_t kPoolMaxSize = 8 * kGiB;
|
||||
#else
|
||||
constexpr size_t kPoolMaxSize = 4 * kGiB;
|
||||
#endif
|
||||
constexpr size_t kMaxSuperPages = kPoolMaxSize / kSuperPageSize;
|
||||
|
||||
static constexpr internal::pool_handle kNonBRPPoolHandle = 1;
|
||||
static constexpr internal::pool_handle kBRPPoolHandle = 2;
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
NumPartitionPagesPerSuperPage() {
|
||||
return kSuperPageSize >> PartitionPageShift();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t MaxSuperPages() {
|
||||
return kMaxSuperPages;
|
||||
}
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
// In 64-bit mode, the direct map allocation granularity is super page size,
|
||||
// because this is the reservation granularit of the GigaCage.
|
||||
constexpr ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
|
||||
return kSuperPageSize;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
|
||||
return kSuperPageShift;
|
||||
}
|
||||
#else // defined(PA_HAS_64_BITS_POINTERS)
|
||||
// In 32-bit mode, address space is space is a scarce resource. Use the system
|
||||
// allocation granularity, which is the lowest possible address space allocation
|
||||
// unit. However, don't go below partition page size, so that GigaCage bitmaps
|
||||
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
DirectMapAllocationGranularity() {
|
||||
return std::max(PageAllocationGranularity(), PartitionPageSize());
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
DirectMapAllocationGranularityShift() {
|
||||
return std::max(PageAllocationGranularityShift(), PartitionPageShift());
|
||||
}
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
DirectMapAllocationGranularityOffsetMask() {
|
||||
return DirectMapAllocationGranularity() - 1;
|
||||
}
|
||||
|
||||
// The "order" of an allocation is closely related to the power-of-1 size of the
|
||||
// allocation. More precisely, the order is the bit index of the
|
||||
// most-significant-bit in the allocation size, where the bit numbers starts at
|
||||
// index 1 for the least-significant-bit.
|
||||
//
|
||||
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
|
||||
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
|
||||
|
||||
// PartitionAlloc should return memory properly aligned for any type, to behave
|
||||
// properly as a generic allocator. This is not strictly required as long as
|
||||
// types are explicitly allocated with PartitionAlloc, but is to use it as a
|
||||
// malloc() implementation, and generally to match malloc()'s behavior.
|
||||
//
|
||||
// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
|
||||
// bytes on 64 bit ones.
|
||||
//
|
||||
// Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
|
||||
constexpr size_t kMinBucketedOrder =
|
||||
kAlignment == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
|
||||
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
|
||||
constexpr size_t kMaxBucketedOrder = 20;
|
||||
constexpr size_t kNumBucketedOrders =
|
||||
(kMaxBucketedOrder - kMinBucketedOrder) + 1;
|
||||
// 4 buckets per order (for the higher orders).
|
||||
constexpr size_t kNumBucketsPerOrderBits = 2;
|
||||
constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
|
||||
constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
|
||||
constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
|
||||
constexpr size_t kMaxBucketSpacing =
|
||||
1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
|
||||
constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
|
||||
((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
|
||||
// Limit when downsizing a direct mapping using `realloc`:
|
||||
constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
|
||||
// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
|
||||
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
|
||||
// harder to exploit.
|
||||
//
|
||||
// There are matching limits in other allocators, such as tcmalloc. See
|
||||
// crbug.com/998048 for details.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
MaxDirectMapped() {
|
||||
// Subtract kSuperPageSize to accommodate for granularity inside
|
||||
// PartitionRoot::GetDirectMapReservationSize.
|
||||
return (1UL << 31) - kSuperPageSize;
|
||||
}
|
||||
|
||||
// Max alignment supported by AlignedAllocFlags().
|
||||
// kSuperPageSize alignment can't be easily supported, because each super page
|
||||
// starts with guard pages & metadata.
|
||||
constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
|
||||
|
||||
constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
|
||||
|
||||
// Constant for the memory reclaim logic.
|
||||
constexpr size_t kMaxFreeableSpans = 16;
|
||||
|
||||
// If the total size in bytes of allocated but not committed pages exceeds this
|
||||
// value (probably it is a "out of virtual address space" crash), a special
|
||||
// crash stack trace is generated at
|
||||
// `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
|
||||
// of virtual address space" from "out of physical memory" in crash reports.
|
||||
constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
|
||||
|
||||
// These byte values match tcmalloc.
|
||||
constexpr unsigned char kUninitializedByte = 0xAB;
|
||||
constexpr unsigned char kFreedByte = 0xCD;
|
||||
|
||||
constexpr unsigned char kQuarantinedByte = 0xEF;
|
||||
|
||||
// 1 is smaller than anything we can use, as it is not properly aligned. Not
|
||||
// using a large size, since PartitionBucket::slot_size is a uint32_t, and
|
||||
// static_cast<uint32_t>(-1) is too close to a "real" size.
|
||||
constexpr size_t kInvalidBucketSize = 1;
|
||||
|
||||
// Flags for `PartitionAllocFlags`.
|
||||
enum PartitionAllocFlags {
|
||||
PartitionAllocReturnNull = 1 << 0,
|
||||
PartitionAllocZeroFill = 1 << 1,
|
||||
PartitionAllocNoHooks = 1 << 2, // Internal only.
|
||||
// If the allocation requires a "slow path" (such as allocating/committing a
|
||||
// new slot span), return nullptr instead. Note this makes all large
|
||||
// allocations return nullptr, such as direct-mapped ones, and even for
|
||||
// smaller ones, a nullptr value is common.
|
||||
PartitionAllocFastPathOrReturnNull = 1 << 3, // Internal only.
|
||||
|
||||
PartitionAllocLastFlag = PartitionAllocFastPathOrReturnNull
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
|
@ -0,0 +1,66 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
|
||||
#include "base/feature_list.h"
|
||||
|
||||
namespace base {
|
||||
namespace features {
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
// If enabled, PCScan is turned on by default for all partitions that don't
|
||||
// disable it explicitly.
|
||||
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
|
||||
FEATURE_DISABLED_BY_DEFAULT};
|
||||
#endif // defined(PA_ALLOW_PCSCAN)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
// If enabled, PCScan is turned on only for the browser's malloc partition.
|
||||
const Feature kPartitionAllocPCScanBrowserOnly{
|
||||
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, this instance belongs to the Control group of the BackupRefPtr
|
||||
// binary experiment.
|
||||
const Feature kPartitionAllocBackupRefPtrControl{
|
||||
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// Use a larger maximum thread cache cacheable bucket size.
|
||||
const Feature kPartitionAllocLargeThreadCacheSize{
|
||||
"PartitionAllocLargeThreadCacheSize", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
const Feature kPartitionAllocLazyCommit{"PartitionAllocLazyCommit",
|
||||
FEATURE_ENABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
|
||||
// affect whether PCScan is enabled itself.
|
||||
const Feature kPartitionAllocPCScanMUAwareScheduler{
|
||||
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, PCScan frees unconditionally all quarantined objects.
|
||||
// This is a performance testing feature.
|
||||
const Feature kPartitionAllocPCScanImmediateFreeing{
|
||||
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// If enabled, PCScan clears eagerly (synchronously) on free().
|
||||
const Feature kPartitionAllocPCScanEagerClearing{
|
||||
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// In addition to heap, scan also the stack of the current mutator.
|
||||
const Feature kPartitionAllocPCScanStackScanning {
|
||||
"PartitionAllocPCScanStackScanning",
|
||||
#if defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
|
||||
};
|
||||
|
||||
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
|
||||
FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
} // namespace features
|
||||
} // namespace base
|
@ -0,0 +1,39 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
struct Feature;
|
||||
|
||||
namespace features {
|
||||
|
||||
#if defined(PA_ALLOW_PCSCAN)
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
|
||||
#endif // defined(PA_ALLOW_PCSCAN)
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocDCScan;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
|
||||
extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
|
||||
|
||||
extern const BASE_EXPORT Feature kPartitionAllocLazyCommit;
|
||||
|
||||
} // namespace features
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
|
@ -0,0 +1,96 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// Alignment has two constraints:
|
||||
// - Alignment requirement for scalar types: alignof(std::max_align_t)
|
||||
// - Alignment requirement for operator new().
|
||||
//
|
||||
// The two are separate on Windows 64 bits, where the first one is 8 bytes, and
|
||||
// the second one 16. We could technically return something different for
|
||||
// malloc() and operator new(), but this would complicate things, and most of
|
||||
// our allocations are presumably coming from operator new() anyway.
|
||||
//
|
||||
// __STDCPP_DEFAULT_NEW_ALIGNMENT__ is C++17. As such, it is not defined on all
|
||||
// platforms, as Chrome's requirement is C++14 as of 2020.
|
||||
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
|
||||
constexpr size_t kAlignment =
|
||||
std::max(alignof(max_align_t), __STDCPP_DEFAULT_NEW_ALIGNMENT__);
|
||||
#else
|
||||
constexpr size_t kAlignment = alignof(max_align_t);
|
||||
#endif
|
||||
static_assert(kAlignment <= 16,
|
||||
"PartitionAlloc doesn't support a fundamental alignment larger "
|
||||
"than 16 bytes.");
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
struct SlotSpanMetadata;
|
||||
|
||||
constexpr bool ThreadSafe = true;
|
||||
constexpr bool NotThreadSafe = false;
|
||||
|
||||
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
BASE_EXPORT void CheckThatSlotOffsetIsZero(void*);
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <bool thread_safe>
|
||||
struct PartitionRoot;
|
||||
|
||||
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
|
||||
using ThreadUnsafePartitionRoot = PartitionRoot<internal::NotThreadSafe>;
|
||||
|
||||
class PartitionStatsDumper;
|
||||
|
||||
} // namespace base
|
||||
|
||||
// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
|
||||
//
|
||||
// The malloc attribute indicates that the function acts like a system memory
|
||||
// allocation function, returning a pointer to allocated storage disjoint from
|
||||
// the storage for any other object accessible to the caller.
|
||||
//
|
||||
// Note that it doesn't apply to realloc()-type functions, as they can return
|
||||
// the same pointer as the one passed as a parameter, as noted in e.g. stdlib.h
|
||||
// on Linux systems.
|
||||
#if defined(__has_attribute)
|
||||
|
||||
#if __has_attribute(malloc)
|
||||
#define MALLOC_FN __attribute__((malloc))
|
||||
#endif
|
||||
|
||||
// Allows the compiler to assume that the return value is aligned on a
|
||||
// kAlignment boundary. This is useful for e.g. using aligned vector
|
||||
// instructions in the constructor for zeroing.
|
||||
#if __has_attribute(assume_aligned)
|
||||
#define MALLOC_ALIGNED __attribute__((assume_aligned(base::kAlignment)))
|
||||
#endif
|
||||
|
||||
#endif // defined(__has_attribute)
|
||||
|
||||
#if !defined(MALLOC_FN)
|
||||
#define MALLOC_FN
|
||||
#endif
|
||||
|
||||
#if !defined(MALLOC_ALIGNED)
|
||||
#define MALLOC_ALIGNED
|
||||
#endif
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
|
117
src/base/allocator/partition_allocator/partition_alloc_hooks.cc
Normal file
117
src/base/allocator/partition_allocator/partition_alloc_hooks.cc
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
|
||||
|
||||
#include <ostream>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
Lock& GetHooksLock() {
|
||||
static NoDestructor<Lock> lock;
|
||||
return *lock;
|
||||
}
|
||||
|
||||
std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
|
||||
std::atomic<PartitionAllocHooks::AllocationObserverHook*>
|
||||
PartitionAllocHooks::allocation_observer_hook_(nullptr);
|
||||
std::atomic<PartitionAllocHooks::FreeObserverHook*>
|
||||
PartitionAllocHooks::free_observer_hook_(nullptr);
|
||||
std::atomic<PartitionAllocHooks::AllocationOverrideHook*>
|
||||
PartitionAllocHooks::allocation_override_hook_(nullptr);
|
||||
std::atomic<PartitionAllocHooks::FreeOverrideHook*>
|
||||
PartitionAllocHooks::free_override_hook_(nullptr);
|
||||
std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
|
||||
PartitionAllocHooks::realloc_override_hook_(nullptr);
|
||||
|
||||
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
|
||||
FreeObserverHook* free_hook) {
|
||||
AutoLock guard(GetHooksLock());
|
||||
|
||||
// Chained hooks are not supported. Registering a non-null hook when a
|
||||
// non-null hook is already registered indicates somebody is trying to
|
||||
// overwrite a hook.
|
||||
PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
|
||||
(!alloc_hook && !free_hook))
|
||||
<< "Overwriting already set observer hooks";
|
||||
allocation_observer_hook_ = alloc_hook;
|
||||
free_observer_hook_ = free_hook;
|
||||
|
||||
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
|
||||
}
|
||||
|
||||
void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
|
||||
FreeOverrideHook* free_hook,
|
||||
ReallocOverrideHook realloc_hook) {
|
||||
AutoLock guard(GetHooksLock());
|
||||
|
||||
PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
|
||||
!realloc_override_hook_) ||
|
||||
(!alloc_hook && !free_hook && !realloc_hook))
|
||||
<< "Overwriting already set override hooks";
|
||||
allocation_override_hook_ = alloc_hook;
|
||||
free_override_hook_ = free_hook;
|
||||
realloc_override_hook_ = realloc_hook;
|
||||
|
||||
hooks_enabled_ = allocation_observer_hook_ || allocation_override_hook_;
|
||||
}
|
||||
|
||||
void PartitionAllocHooks::AllocationObserverHookIfEnabled(
|
||||
void* address,
|
||||
size_t size,
|
||||
const char* type_name) {
|
||||
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
|
||||
hook(address, size, type_name);
|
||||
}
|
||||
|
||||
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
|
||||
void** out,
|
||||
int flags,
|
||||
size_t size,
|
||||
const char* type_name) {
|
||||
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
|
||||
return hook(out, flags, size, type_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
|
||||
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
|
||||
hook(address);
|
||||
}
|
||||
|
||||
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
|
||||
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
|
||||
return hook(address);
|
||||
return false;
|
||||
}
|
||||
|
||||
void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
|
||||
void* new_address,
|
||||
size_t size,
|
||||
const char* type_name) {
|
||||
// Report a reallocation as a free followed by an allocation.
|
||||
AllocationObserverHook* allocation_hook =
|
||||
allocation_observer_hook_.load(std::memory_order_relaxed);
|
||||
FreeObserverHook* free_hook =
|
||||
free_observer_hook_.load(std::memory_order_relaxed);
|
||||
if (allocation_hook && free_hook) {
|
||||
free_hook(old_address);
|
||||
allocation_hook(new_address, size, type_name);
|
||||
}
|
||||
}
|
||||
|
||||
bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
|
||||
void* address) {
|
||||
if (ReallocOverrideHook* hook =
|
||||
realloc_override_hook_.load(std::memory_order_relaxed)) {
|
||||
return hook(out, address);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace base
|
@ -0,0 +1,85 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// PartitionAlloc supports setting hooks to observe allocations/frees as they
|
||||
// occur as well as 'override' hooks that allow overriding those operations.
|
||||
class BASE_EXPORT PartitionAllocHooks {
|
||||
public:
|
||||
// Log allocation and free events.
|
||||
typedef void AllocationObserverHook(void* address,
|
||||
size_t size,
|
||||
const char* type_name);
|
||||
typedef void FreeObserverHook(void* address);
|
||||
|
||||
// If it returns true, the allocation has been overridden with the pointer in
|
||||
// *out.
|
||||
typedef bool AllocationOverrideHook(void** out,
|
||||
int flags,
|
||||
size_t size,
|
||||
const char* type_name);
|
||||
// If it returns true, then the allocation was overridden and has been freed.
|
||||
typedef bool FreeOverrideHook(void* address);
|
||||
// If it returns true, the underlying allocation is overridden and *out holds
|
||||
// the size of the underlying allocation.
|
||||
typedef bool ReallocOverrideHook(size_t* out, void* address);
|
||||
|
||||
// To unhook, call Set*Hooks with nullptrs.
|
||||
static void SetObserverHooks(AllocationObserverHook* alloc_hook,
|
||||
FreeObserverHook* free_hook);
|
||||
static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
|
||||
FreeOverrideHook* free_hook,
|
||||
ReallocOverrideHook realloc_hook);
|
||||
|
||||
// Helper method to check whether hooks are enabled. This is an optimization
|
||||
// so that if a function needs to call observer and override hooks in two
|
||||
// different places this value can be cached and only loaded once.
|
||||
static bool AreHooksEnabled() {
|
||||
return hooks_enabled_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
static void AllocationObserverHookIfEnabled(void* address,
|
||||
size_t size,
|
||||
const char* type_name);
|
||||
static bool AllocationOverrideHookIfEnabled(void** out,
|
||||
int flags,
|
||||
size_t size,
|
||||
const char* type_name);
|
||||
|
||||
static void FreeObserverHookIfEnabled(void* address);
|
||||
static bool FreeOverrideHookIfEnabled(void* address);
|
||||
|
||||
static void ReallocObserverHookIfEnabled(void* old_address,
|
||||
void* new_address,
|
||||
size_t size,
|
||||
const char* type_name);
|
||||
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
|
||||
|
||||
private:
|
||||
// Single bool that is used to indicate whether observer or allocation hooks
|
||||
// are set to reduce the numbers of loads required to check whether hooking is
|
||||
// enabled.
|
||||
static std::atomic<bool> hooks_enabled_;
|
||||
|
||||
// Lock used to synchronize Set*Hooks calls.
|
||||
static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
|
||||
static std::atomic<FreeObserverHook*> free_observer_hook_;
|
||||
|
||||
static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
|
||||
static std::atomic<FreeOverrideHook*> free_override_hook_;
|
||||
static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_
|
@ -0,0 +1,54 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/check.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/logging_buildflags.h"
|
||||
|
||||
// When PartitionAlloc is used as the default allocator, we cannot use the
|
||||
// regular (D)CHECK() macros, as they allocate internally. (c.f. //
|
||||
// base/allocator/partition_allocator/partition_alloc_check.h)
|
||||
// So PA_NOTREACHED() uses PA_DCHECK() instead of DCHECK().
|
||||
|
||||
#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
|
||||
#define PA_NOTREACHED() \
|
||||
true ? logging::RawError(__FILE__ \
|
||||
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
|
||||
: EAT_CHECK_STREAM_PARAMS()
|
||||
|
||||
#elif BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && defined(OFFICIAL_BUILD) && \
|
||||
defined(NDEBUG) && DCHECK_IS_ON()
|
||||
|
||||
// PA_DCHECK(condition) is PA_CHECK(condition) if DCHECK_IS_ON().
|
||||
// When BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC), OFFICIAL_BUILD, NDEBUG are
|
||||
// defined, PA_CHECK(false) is IMMEDIATE_CRASH(). Since IMMEDIATE_CRASH()
|
||||
// hints __builtin_unreachable() to the compiler, the following code causes
|
||||
// compile failure:
|
||||
// switch(...) {
|
||||
// ...
|
||||
// case X:
|
||||
// PA_DCHECK(false);
|
||||
// FALLTHROUGH; // The compiler knows "not reached".
|
||||
// case Y:
|
||||
// ...
|
||||
// So define PA_NOTREACHED() by using async-signal-safe RawCheck().
|
||||
#define PA_NOTREACHED() \
|
||||
UNLIKELY(true) \
|
||||
? logging::RawCheck(__FILE__ \
|
||||
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
|
||||
: EAT_CHECK_STREAM_PARAMS()
|
||||
|
||||
#else
|
||||
|
||||
// PA_CHECK() uses RawCheck() for error reporting. So "PA_DCHECK(false);
|
||||
// FALLTHROUGH;" doesn't cause compile failure.
|
||||
#define PA_NOTREACHED() PA_DCHECK(false)
|
||||
|
||||
#endif
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_NOTREACHED_H_
|
970
src/base/allocator/partition_allocator/partition_bucket.cc
Normal file
970
src/base/allocator/partition_allocator/partition_bucket.cc
Normal file
@ -0,0 +1,970 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_bucket.h"
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/check.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
template <bool thread_safe>
|
||||
[[noreturn]] NOINLINE void PartitionOutOfMemoryMappingFailure(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
size_t size) LOCKS_EXCLUDED(root->lock_) {
|
||||
NO_CODE_FOLDING();
|
||||
root->OutOfMemory(size);
|
||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
[[noreturn]] NOINLINE void PartitionOutOfMemoryCommitFailure(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
size_t size) LOCKS_EXCLUDED(root->lock_) {
|
||||
NO_CODE_FOLDING();
|
||||
root->OutOfMemory(size);
|
||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||
}
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
|
||||
// that a partial super page is allowed at the end. Since the block list uses
|
||||
// kSuperPageSize granularity, a partial super page is considered blocked if
|
||||
// there is a raw_ptr<T> pointing anywhere in that super page, even if doesn't
|
||||
// point to that partially allocated region.
|
||||
bool AreAllowedSuperPagesForBRPPool(const char* start, const char* end) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(start) % kSuperPageSize));
|
||||
for (const char* super_page = start; super_page < end;
|
||||
super_page += kSuperPageSize) {
|
||||
// If any blocked superpage is found inside the given memory region,
|
||||
// the memory region is blocked.
|
||||
if (!AddressPoolManagerBitmap::IsAllowedSuperPageForBRPPool(super_page))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif // !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
// Reserves |requested_size| worth of super pages from the specified pool of the
|
||||
// GigaCage. If BRP pool is requested this function will honor BRP block list.
|
||||
//
|
||||
// The returned pointer will be aligned to kSuperPageSize, and so
|
||||
// |requested_address| should be. |requested_size| doesn't have to be, however.
|
||||
//
|
||||
// |requested_address| is merely a hint, which will be attempted, but easily
|
||||
// given up on if doesn't work the first time.
|
||||
//
|
||||
// The function doesn't need to hold root->lock_ or any other locks, because:
|
||||
// - It (1) reserves memory, (2) then consults AreAllowedSuperPagesForBRPPool
|
||||
// for that memory, and (3) returns the memory if
|
||||
// allowed, or unreserves and decommits if not allowed. So no other
|
||||
// overlapping region can be allocated while executing
|
||||
// AreAllowedSuperPagesForBRPPool.
|
||||
// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
|
||||
// designed to not need locking.
|
||||
char* ReserveMemoryFromGigaCage(pool_handle pool,
|
||||
void* requested_address,
|
||||
size_t requested_size) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(requested_address) % kSuperPageSize));
|
||||
|
||||
char* ptr = AddressPoolManager::GetInstance()->Reserve(
|
||||
pool, requested_address, requested_size);
|
||||
|
||||
// In 32-bit mode, when allocating from BRP pool, verify that the requested
|
||||
// allocation honors the block list. Find a better address otherwise.
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
if (pool == GetBRPPool()) {
|
||||
constexpr int kMaxRandomAddressTries = 10;
|
||||
for (int i = 0; i < kMaxRandomAddressTries; ++i) {
|
||||
if (!ptr || AreAllowedSuperPagesForBRPPool(ptr, ptr + requested_size))
|
||||
break;
|
||||
AddressPoolManager::GetInstance()->UnreserveAndDecommit(pool, ptr,
|
||||
requested_size);
|
||||
// No longer try to honor |requested_address|, because it didn't work for
|
||||
// us last time.
|
||||
ptr = AddressPoolManager::GetInstance()->Reserve(pool, nullptr,
|
||||
requested_size);
|
||||
}
|
||||
|
||||
// If the allocation attempt succeeds, we will break out of the following
|
||||
// loop immediately.
|
||||
//
|
||||
// Last resort: sequentially scan the whole 32-bit address space. The number
|
||||
// of blocked super-pages should be very small, so we expect to practically
|
||||
// never need to run the following code. Note that it may fail to find an
|
||||
// available page, e.g., when it becomes available after the scan passes
|
||||
// through it, but we accept the risk.
|
||||
for (uintptr_t ptr_to_try = kSuperPageSize; ptr_to_try != 0;
|
||||
ptr_to_try += kSuperPageSize) {
|
||||
if (!ptr || AreAllowedSuperPagesForBRPPool(ptr, ptr + requested_size))
|
||||
break;
|
||||
AddressPoolManager::GetInstance()->UnreserveAndDecommit(pool, ptr,
|
||||
requested_size);
|
||||
// Reserve() can return a different pointer than attempted.
|
||||
ptr = AddressPoolManager::GetInstance()->Reserve(
|
||||
pool, reinterpret_cast<void*>(ptr_to_try), requested_size);
|
||||
}
|
||||
|
||||
// If the loop ends naturally, the last allocated region hasn't been
|
||||
// verified. Do it now.
|
||||
if (ptr && !AreAllowedSuperPagesForBRPPool(ptr, ptr + requested_size)) {
|
||||
AddressPoolManager::GetInstance()->UnreserveAndDecommit(pool, ptr,
|
||||
requested_size);
|
||||
ptr = nullptr;
|
||||
}
|
||||
}
|
||||
#endif // !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
// Only mark the region as belonging to the pool after it has passed the
|
||||
// blocklist check in order to avoid a potential race with destructing a
|
||||
// raw_ptr<T> object that points to non-PA memory in another thread.
|
||||
// If `MarkUsed` was called earlier, the other thread could incorrectly
|
||||
// determine that the allocation had come form PartitionAlloc.
|
||||
if (ptr)
|
||||
AddressPoolManager::GetInstance()->MarkUsed(pool, ptr, requested_size);
|
||||
#endif
|
||||
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(ptr) % kSuperPageSize));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
SlotSpanMetadata<thread_safe>* PartitionDirectMap(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t raw_size,
|
||||
size_t slot_span_alignment) {
|
||||
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
|
||||
bits::IsPowerOfTwo(slot_span_alignment));
|
||||
|
||||
// No static EXCLUSIVE_LOCKS_REQUIRED(), as the checker doesn't understand
|
||||
// scoped unlocking.
|
||||
root->lock_.AssertAcquired();
|
||||
|
||||
const bool return_null = flags & PartitionAllocReturnNull;
|
||||
if (UNLIKELY(raw_size > MaxDirectMapped())) {
|
||||
if (return_null)
|
||||
return nullptr;
|
||||
|
||||
// The lock is here to protect PA from:
|
||||
// 1. Concurrent calls
|
||||
// 2. Reentrant calls
|
||||
//
|
||||
// This is fine here however, as:
|
||||
// 1. Concurrency: |PartitionRoot::OutOfMemory()| never returns, so the lock
|
||||
// will not be re-acquired, which would lead to acting on inconsistent
|
||||
// data that could have been modified in-between releasing and acquiring
|
||||
// it.
|
||||
// 2. Reentrancy: This is why we release the lock. On some platforms,
|
||||
// terminating the process may free() memory, or even possibly try to
|
||||
// allocate some. Calling free() is fine, but will deadlock since
|
||||
// |PartitionRoot::lock_| is not recursive.
|
||||
//
|
||||
// Supporting reentrant calls properly is hard, and not a requirement for
|
||||
// PA. However up to that point, we've only *read* data, not *written* to
|
||||
// any state. Reentrant calls are then fine, especially as we don't continue
|
||||
// on this path. The only downside is possibly endless recursion if the OOM
|
||||
// handler allocates and fails to use UncheckedMalloc() or equivalent, but
|
||||
// that's violating the contract of base::TerminateBecauseOutOfMemory().
|
||||
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
|
||||
PartitionExcessiveAllocationSize(raw_size);
|
||||
}
|
||||
|
||||
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
|
||||
PartitionPage<thread_safe>* page = nullptr;
|
||||
|
||||
{
|
||||
// Getting memory for direct-mapped allocations doesn't interact with the
|
||||
// rest of the allocator, but takes a long time, as it involves several
|
||||
// system calls. With GigaCage, no mmap() (or equivalent) call is made on 64
|
||||
// bit systems, but page permissions are changed with mprotect(), which is a
|
||||
// syscall.
|
||||
//
|
||||
// These calls are almost always slow (at least a couple us per syscall on a
|
||||
// desktop Linux machine), and they also have a very long latency tail,
|
||||
// possibly from getting descheduled. As a consequence, we should not hold
|
||||
// the lock when performing a syscall. This is not the only problematic
|
||||
// location, but since this one doesn't interact with the rest of the
|
||||
// allocator, we can safely drop and then re-acquire the lock.
|
||||
//
|
||||
// Note that this only affects allocations that are not served out of the
|
||||
// thread cache, but as a simple example the buffer partition in blink is
|
||||
// frequently used for large allocations (e.g. ArrayBuffer), and frequent,
|
||||
// small ones (e.g. WTF::String), and does not have a thread cache.
|
||||
ScopedUnlockGuard<thread_safe> scoped_unlock{root->lock_};
|
||||
|
||||
const size_t slot_size =
|
||||
PartitionRoot<thread_safe>::GetDirectMapSlotSize(raw_size);
|
||||
// The super page starts with a partition page worth of metadata and guard
|
||||
// pages, hence alignment requests ==PartitionPageSize() will be
|
||||
// automatically satisfied. Padding is needed for higher-order alignment
|
||||
// requests. Note, |slot_span_alignment| is at least 1 partition page.
|
||||
const size_t padding_for_alignment =
|
||||
slot_span_alignment - PartitionPageSize();
|
||||
const size_t reservation_size =
|
||||
PartitionRoot<thread_safe>::GetDirectMapReservationSize(
|
||||
raw_size + padding_for_alignment);
|
||||
#if DCHECK_IS_ON()
|
||||
const size_t available_reservation_size =
|
||||
reservation_size - padding_for_alignment -
|
||||
PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
|
||||
PA_DCHECK(slot_size <= available_reservation_size);
|
||||
#endif
|
||||
|
||||
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
|
||||
// BackupRefPtr support.
|
||||
pool_handle pool = root->ChooseGigaCagePool();
|
||||
char* reservation_start =
|
||||
ReserveMemoryFromGigaCage(pool, nullptr, reservation_size);
|
||||
if (UNLIKELY(!reservation_start)) {
|
||||
if (return_null)
|
||||
return nullptr;
|
||||
|
||||
PartitionOutOfMemoryMappingFailure(root, reservation_size);
|
||||
}
|
||||
|
||||
root->total_size_of_direct_mapped_pages.fetch_add(
|
||||
reservation_size, std::memory_order_relaxed);
|
||||
|
||||
// Shift by 1 partition page (metadata + guard pages) and alignment padding.
|
||||
char* const slot_start =
|
||||
reservation_start + PartitionPageSize() + padding_for_alignment;
|
||||
RecommitSystemPages(
|
||||
reservation_start + SystemPageSize(),
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is used,
|
||||
// allocate 2 SystemPages, one for SuperPage metadata and the other for
|
||||
// RefCount "bitmap" (only one of its elements will be used).
|
||||
(pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
|
||||
#else
|
||||
SystemPageSize(),
|
||||
#endif
|
||||
PageReadWrite, PageUpdatePermissions);
|
||||
|
||||
// No need to hold root->lock_. Now that memory is reserved, no other
|
||||
// overlapping region can be allocated (because of how GigaCage works),
|
||||
// so no other thread can update the same offset table entries at the
|
||||
// same time. Furthermore, nobody will be ready these offsets until this
|
||||
// function returns.
|
||||
uintptr_t ptr_start = reinterpret_cast<uintptr_t>(reservation_start);
|
||||
uintptr_t ptr_end = ptr_start + reservation_size;
|
||||
auto* offset_ptr = ReservationOffsetPointer(ptr_start);
|
||||
int offset = 0;
|
||||
while (ptr_start < ptr_end) {
|
||||
PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(ptr_start));
|
||||
PA_DCHECK(offset < kOffsetTagNormalBuckets);
|
||||
*offset_ptr++ = offset++;
|
||||
ptr_start += kSuperPageSize;
|
||||
}
|
||||
|
||||
auto* super_page_extent =
|
||||
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(reservation_start));
|
||||
super_page_extent->root = root;
|
||||
// The new structures are all located inside a fresh system page so they
|
||||
// will all be zeroed out. These DCHECKs are for documentation and to assert
|
||||
// our expectations of the kernel.
|
||||
PA_DCHECK(!super_page_extent->number_of_consecutive_super_pages);
|
||||
PA_DCHECK(!super_page_extent->next);
|
||||
|
||||
PartitionPage<thread_safe>* first_page =
|
||||
reinterpret_cast<PartitionPage<thread_safe>*>(super_page_extent) + 1;
|
||||
page = PartitionPage<thread_safe>::FromPtr(slot_start);
|
||||
// |first_page| and |page| may be equal, if there is no alignment padding.
|
||||
if (page != first_page) {
|
||||
PA_DCHECK(page > first_page);
|
||||
PA_DCHECK(page - first_page <=
|
||||
PartitionPage<thread_safe>::kMaxSlotSpanMetadataOffset);
|
||||
PA_CHECK(!first_page->is_valid);
|
||||
first_page->has_valid_span_after_this = true;
|
||||
first_page->slot_span_metadata_offset = page - first_page;
|
||||
}
|
||||
auto* metadata =
|
||||
reinterpret_cast<PartitionDirectMapMetadata<thread_safe>*>(page);
|
||||
// Since direct map metadata is larger than PartitionPage, make sure the
|
||||
// first and the last bytes are on the same system page, i.e. within the
|
||||
// super page metadata region.
|
||||
PA_DCHECK(
|
||||
bits::AlignDown(reinterpret_cast<char*>(metadata), SystemPageSize()) ==
|
||||
bits::AlignDown(reinterpret_cast<char*>(metadata) +
|
||||
sizeof(PartitionDirectMapMetadata<thread_safe>) - 1,
|
||||
SystemPageSize()));
|
||||
PA_DCHECK(page == &metadata->page);
|
||||
page->is_valid = true;
|
||||
PA_DCHECK(!page->has_valid_span_after_this);
|
||||
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||
PA_DCHECK(!page->slot_span_metadata.next_slot_span);
|
||||
PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
|
||||
PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
|
||||
PA_DCHECK(!page->slot_span_metadata.empty_cache_index);
|
||||
|
||||
PA_DCHECK(!metadata->subsequent_page.subsequent_page_metadata.raw_size);
|
||||
// Raw size is set later, by the caller.
|
||||
metadata->subsequent_page.slot_span_metadata_offset = 1;
|
||||
|
||||
PA_DCHECK(!metadata->bucket.active_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
|
||||
PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
|
||||
PA_DCHECK(!metadata->bucket.num_full_slot_spans);
|
||||
metadata->bucket.slot_size = slot_size;
|
||||
|
||||
new (&page->slot_span_metadata)
|
||||
SlotSpanMetadata<thread_safe>(&metadata->bucket);
|
||||
|
||||
// It is typically possible to map a large range of inaccessible pages, and
|
||||
// this is leveraged in multiple places, including the GigaCage. However,
|
||||
// this doesn't mean that we can commit all this memory. For the vast
|
||||
// majority of allocations, this just means that we crash in a slightly
|
||||
// different place, but for callers ready to handle failures, we have to
|
||||
// return nullptr. See crbug.com/1187404.
|
||||
//
|
||||
// Note that we didn't check above, because if we cannot even commit a
|
||||
// single page, then this is likely hopeless anyway, and we will crash very
|
||||
// soon.
|
||||
const bool ok = root->TryRecommitSystemPagesForData(slot_start, slot_size,
|
||||
PageUpdatePermissions);
|
||||
if (!ok) {
|
||||
if (!return_null) {
|
||||
PartitionOutOfMemoryCommitFailure(root, slot_size);
|
||||
}
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
AddressPoolManager::GetInstance()->MarkUnused(pool, reservation_start,
|
||||
reservation_size);
|
||||
#endif
|
||||
AddressPoolManager::GetInstance()->UnreserveAndDecommit(
|
||||
pool, reservation_start, reservation_size);
|
||||
|
||||
root->total_size_of_direct_mapped_pages.fetch_sub(
|
||||
reservation_size, std::memory_order_relaxed);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto* next_entry = new (slot_start) PartitionFreelistEntry();
|
||||
page->slot_span_metadata.SetFreelistHead(next_entry);
|
||||
|
||||
map_extent = &metadata->direct_map_extent;
|
||||
map_extent->reservation_size = reservation_size;
|
||||
map_extent->padding_for_alignment = padding_for_alignment;
|
||||
map_extent->bucket = &metadata->bucket;
|
||||
}
|
||||
|
||||
root->lock_.AssertAcquired();
|
||||
|
||||
// Maintain the doubly-linked list of all direct mappings.
|
||||
map_extent->next_extent = root->direct_map_list;
|
||||
if (map_extent->next_extent)
|
||||
map_extent->next_extent->prev_extent = map_extent;
|
||||
map_extent->prev_extent = nullptr;
|
||||
root->direct_map_list = map_extent;
|
||||
|
||||
return &page->slot_span_metadata;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO(ajwong): This seems to interact badly with
|
||||
// get_pages_per_slot_span() which rounds the value from this up to a
|
||||
// multiple of NumSystemPagesPerPartitionPage() (aka 4) anyways.
|
||||
// http://crbug.com/776537
|
||||
//
|
||||
// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
|
||||
// both used and unsed pages.
|
||||
// http://crbug.com/776537
|
||||
template <bool thread_safe>
|
||||
uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
|
||||
// This works out reasonably for the current bucket sizes of the generic
|
||||
// allocator, and the current values of partition page size and constants.
|
||||
// Specifically, we have enough room to always pack the slots perfectly into
|
||||
// some number of system pages. The only waste is the waste associated with
|
||||
// unfaulted pages (i.e. wasted address space).
|
||||
// TODO: we end up using a lot of system pages for very small sizes. For
|
||||
// example, we'll use 12 system pages for slot size 24. The slot size is
|
||||
// so small that the waste would be tiny with just 4, or 1, system pages.
|
||||
// Later, we can investigate whether there are anti-fragmentation benefits
|
||||
// to using fewer system pages.
|
||||
double best_waste_ratio = 1.0f;
|
||||
uint16_t best_pages = 0;
|
||||
if (slot_size > MaxRegularSlotSpanSize()) {
|
||||
// TODO(ajwong): Why is there a DCHECK here for this?
|
||||
// http://crbug.com/776537
|
||||
PA_DCHECK(!(slot_size % SystemPageSize()));
|
||||
best_pages = static_cast<uint16_t>(slot_size >> SystemPageShift());
|
||||
PA_CHECK(best_pages <= std::numeric_limits<uint8_t>::max());
|
||||
return static_cast<uint8_t>(best_pages);
|
||||
}
|
||||
PA_DCHECK(slot_size <= MaxRegularSlotSpanSize());
|
||||
for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
|
||||
i <= MaxSystemPagesPerRegularSlotSpan(); ++i) {
|
||||
size_t page_size = i << SystemPageShift();
|
||||
size_t num_slots = page_size / slot_size;
|
||||
size_t waste = page_size - (num_slots * slot_size);
|
||||
// Leaving a page unfaulted is not free; the page will occupy an empty page
|
||||
// table entry. Make a simple attempt to account for that.
|
||||
//
|
||||
// TODO(ajwong): This looks wrong. PTEs are allocated for all pages
|
||||
// regardless of whether or not they are wasted. Should it just
|
||||
// be waste += i * sizeof(void*)?
|
||||
// http://crbug.com/776537
|
||||
size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
|
||||
size_t num_unfaulted_pages =
|
||||
num_remainder_pages
|
||||
? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
|
||||
: 0;
|
||||
waste += sizeof(void*) * num_unfaulted_pages;
|
||||
double waste_ratio =
|
||||
static_cast<double>(waste) / static_cast<double>(page_size);
|
||||
if (waste_ratio < best_waste_ratio) {
|
||||
best_waste_ratio = waste_ratio;
|
||||
best_pages = i;
|
||||
}
|
||||
}
|
||||
PA_DCHECK(best_pages > 0);
|
||||
PA_CHECK(best_pages <= MaxSystemPagesPerRegularSlotSpan());
|
||||
return static_cast<uint8_t>(best_pages);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
|
||||
slot_size = new_slot_size;
|
||||
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
|
||||
active_slot_spans_head =
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||
empty_slot_spans_head = nullptr;
|
||||
decommitted_slot_spans_head = nullptr;
|
||||
num_full_slot_spans = 0;
|
||||
num_system_pages_per_slot_span = get_system_pages_per_slot_span();
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
NOINLINE void PartitionBucket<thread_safe>::OnFull() {
|
||||
OOM_CRASH(0);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t slot_span_alignment) {
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
|
||||
PartitionPageSize()));
|
||||
PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
|
||||
PartitionPageSize()));
|
||||
|
||||
size_t num_partition_pages = get_pages_per_slot_span();
|
||||
size_t slot_span_reservation_size = num_partition_pages
|
||||
<< PartitionPageShift();
|
||||
size_t slot_span_committed_size = get_bytes_per_span();
|
||||
PA_DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
|
||||
PA_DCHECK(slot_span_committed_size % SystemPageSize() == 0);
|
||||
PA_DCHECK(slot_span_committed_size <= slot_span_reservation_size);
|
||||
|
||||
auto adjusted_next_partition_page =
|
||||
bits::AlignUp(root->next_partition_page, slot_span_alignment);
|
||||
if (UNLIKELY(adjusted_next_partition_page + slot_span_reservation_size >
|
||||
root->next_partition_page_end)) {
|
||||
// In this case, we can no longer hand out pages from the current super page
|
||||
// allocation. Get a new super page.
|
||||
if (!AllocNewSuperPage(root, flags)) {
|
||||
return nullptr;
|
||||
}
|
||||
// AllocNewSuperPage() updates root->next_partition_page, re-query.
|
||||
adjusted_next_partition_page =
|
||||
bits::AlignUp(root->next_partition_page, slot_span_alignment);
|
||||
PA_CHECK(adjusted_next_partition_page + slot_span_reservation_size <=
|
||||
root->next_partition_page_end);
|
||||
}
|
||||
|
||||
auto* gap_start_page =
|
||||
PartitionPage<thread_safe>::FromPtr(root->next_partition_page);
|
||||
auto* gap_end_page =
|
||||
PartitionPage<thread_safe>::FromPtr(adjusted_next_partition_page);
|
||||
for (auto* page = gap_start_page; page < gap_end_page; ++page) {
|
||||
PA_DCHECK(!page->is_valid);
|
||||
page->has_valid_span_after_this = 1;
|
||||
}
|
||||
root->next_partition_page =
|
||||
adjusted_next_partition_page + slot_span_reservation_size;
|
||||
|
||||
void* slot_span_start = adjusted_next_partition_page;
|
||||
auto* slot_span = &gap_end_page->slot_span_metadata;
|
||||
InitializeSlotSpan(slot_span);
|
||||
// Now that slot span is initialized, it's safe to call FromSlotStartPtr.
|
||||
PA_DCHECK(slot_span ==
|
||||
SlotSpanMetadata<thread_safe>::FromSlotStartPtr(slot_span_start));
|
||||
|
||||
// System pages in the super page come in a decommited state. Commit them
|
||||
// before vending them back.
|
||||
// If lazy commit is enabled, pages will be committed when provisioning slots,
|
||||
// in ProvisionMoreSlotsAndAllocOne(), not here.
|
||||
if (!root->use_lazy_commit) {
|
||||
root->RecommitSystemPagesForData(slot_span_start, slot_span_committed_size,
|
||||
PageUpdatePermissions);
|
||||
}
|
||||
|
||||
// Double check that we had enough space in the super page for the new slot
|
||||
// span.
|
||||
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSuperPage(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
int flags) {
|
||||
// Need a new super page. We want to allocate super pages in a contiguous
|
||||
// address region as much as possible. This is important for not causing
|
||||
// page table bloat and not fragmenting address spaces in 32 bit
|
||||
// architectures.
|
||||
char* requested_address = root->next_super_page;
|
||||
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
|
||||
// BackupRefPtr support.
|
||||
pool_handle pool = root->ChooseGigaCagePool();
|
||||
char* super_page =
|
||||
ReserveMemoryFromGigaCage(pool, requested_address, kSuperPageSize);
|
||||
if (UNLIKELY(!super_page)) {
|
||||
if (flags & PartitionAllocReturnNull)
|
||||
return nullptr;
|
||||
|
||||
// Didn't manage to get a new uncommitted super page -> address space issue.
|
||||
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
|
||||
PartitionOutOfMemoryMappingFailure(root, kSuperPageSize);
|
||||
}
|
||||
|
||||
*ReservationOffsetPointer(reinterpret_cast<uintptr_t>(super_page)) =
|
||||
kOffsetTagNormalBuckets;
|
||||
|
||||
root->total_size_of_super_pages.fetch_add(kSuperPageSize,
|
||||
std::memory_order_relaxed);
|
||||
|
||||
root->next_super_page = super_page + kSuperPageSize;
|
||||
char* state_bitmap = super_page + PartitionPageSize();
|
||||
PA_DCHECK(reinterpret_cast<char*>(SuperPageStateBitmap(super_page)) ==
|
||||
state_bitmap);
|
||||
const size_t state_bitmap_reservation_size =
|
||||
root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
|
||||
const size_t state_bitmap_size_to_commit =
|
||||
root->IsQuarantineAllowed() ? CommittedStateBitmapSize() : 0;
|
||||
PA_DCHECK(state_bitmap_reservation_size % PartitionPageSize() == 0);
|
||||
PA_DCHECK(state_bitmap_size_to_commit % SystemPageSize() == 0);
|
||||
PA_DCHECK(state_bitmap_size_to_commit <= state_bitmap_reservation_size);
|
||||
char* ret = state_bitmap + state_bitmap_reservation_size;
|
||||
root->next_partition_page = ret;
|
||||
root->next_partition_page_end = root->next_super_page - PartitionPageSize();
|
||||
PA_DCHECK(ret ==
|
||||
SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
|
||||
PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
|
||||
|
||||
// Keep the first partition page in the super page inaccessible to serve as a
|
||||
// guard page, except an "island" in the middle where we put page metadata and
|
||||
// also a tiny amount of extent metadata.
|
||||
RecommitSystemPages(
|
||||
super_page + SystemPageSize(),
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is used,
|
||||
// allocate 2 SystemPages, one for SuperPage metadata and the other for
|
||||
// RefCount bitmap.
|
||||
(pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
|
||||
#else
|
||||
SystemPageSize(),
|
||||
#endif
|
||||
PageReadWrite, PageUpdatePermissions);
|
||||
|
||||
// If PCScan is used, commit the quarantine bitmap. Otherwise, leave it
|
||||
// uncommitted and let PartitionRoot::EnablePCScan commit it when needed.
|
||||
if (root->IsQuarantineEnabled()) {
|
||||
RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
|
||||
PageReadWrite, PageUpdatePermissions);
|
||||
PCScan::RegisterNewSuperPage(root, reinterpret_cast<uintptr_t>(super_page));
|
||||
}
|
||||
|
||||
// If we were after a specific address, but didn't get it, assume that
|
||||
// the system chose a lousy address. Here most OS'es have a default
|
||||
// algorithm that isn't randomized. For example, most Linux
|
||||
// distributions will allocate the mapping directly before the last
|
||||
// successful mapping, which is far from random. So we just get fresh
|
||||
// randomness for the next mapping attempt.
|
||||
if (requested_address && requested_address != super_page)
|
||||
root->next_super_page = nullptr;
|
||||
|
||||
// We allocated a new super page so update super page metadata.
|
||||
// First check if this is a new extent or not.
|
||||
auto* latest_extent =
|
||||
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page));
|
||||
// By storing the root in every extent metadata object, we have a fast way
|
||||
// to go from a pointer within the partition to the root object.
|
||||
latest_extent->root = root;
|
||||
// Most new extents will be part of a larger extent, and these two fields
|
||||
// are unused, but we initialize them to 0 so that we get a clear signal
|
||||
// in case they are accidentally used.
|
||||
latest_extent->number_of_consecutive_super_pages = 0;
|
||||
latest_extent->next = nullptr;
|
||||
latest_extent->number_of_nonempty_slot_spans = 0;
|
||||
|
||||
PartitionSuperPageExtentEntry<thread_safe>* current_extent =
|
||||
root->current_extent;
|
||||
const bool is_new_extent = super_page != requested_address;
|
||||
if (UNLIKELY(is_new_extent)) {
|
||||
if (UNLIKELY(!current_extent)) {
|
||||
PA_DCHECK(!root->first_extent);
|
||||
root->first_extent = latest_extent;
|
||||
} else {
|
||||
PA_DCHECK(current_extent->number_of_consecutive_super_pages);
|
||||
current_extent->next = latest_extent;
|
||||
}
|
||||
root->current_extent = latest_extent;
|
||||
latest_extent->number_of_consecutive_super_pages = 1;
|
||||
} else {
|
||||
// We allocated next to an existing extent so just nudge the size up a
|
||||
// little.
|
||||
PA_DCHECK(current_extent->number_of_consecutive_super_pages);
|
||||
++current_extent->number_of_consecutive_super_pages;
|
||||
PA_DCHECK(ret > SuperPagesBeginFromExtent(current_extent) &&
|
||||
ret < SuperPagesEndFromExtent(current_extent));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
new (slot_span) SlotSpanMetadata<thread_safe>(this);
|
||||
slot_span->empty_cache_index = -1;
|
||||
|
||||
slot_span->Reset();
|
||||
|
||||
uint16_t num_partition_pages = get_pages_per_slot_span();
|
||||
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
|
||||
for (uint16_t i = 0; i < num_partition_pages; ++i, ++page) {
|
||||
PA_DCHECK(i <= PartitionPage<thread_safe>::kMaxSlotSpanMetadataOffset);
|
||||
page->slot_span_metadata_offset = i;
|
||||
page->is_valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span !=
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
uint16_t num_slots = slot_span->num_unprovisioned_slots;
|
||||
PA_DCHECK(num_slots);
|
||||
// We should only get here when _every_ slot is either used or unprovisioned.
|
||||
// (The third state is "on the freelist". If we have a non-empty freelist, we
|
||||
// should not get here.)
|
||||
PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
|
||||
// Similarly, make explicitly sure that the freelist is empty.
|
||||
PA_DCHECK(!slot_span->freelist_head);
|
||||
PA_DCHECK(slot_span->num_allocated_slots >= 0);
|
||||
|
||||
size_t size = slot_size;
|
||||
char* base = reinterpret_cast<char*>(
|
||||
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
|
||||
// If we got here, the first unallocated slot is either partially or fully on
|
||||
// an uncommitted page. If the latter, it must be at the start of that page.
|
||||
char* return_slot = base + (size * slot_span->num_allocated_slots);
|
||||
char* next_slot = return_slot + size;
|
||||
char* commit_start = bits::AlignUp(return_slot, SystemPageSize());
|
||||
PA_DCHECK(next_slot > commit_start);
|
||||
char* commit_end = bits::AlignUp(next_slot, SystemPageSize());
|
||||
// If the slot was partially committed, |return_slot| and |next_slot| fall
|
||||
// in different pages. If the slot was fully uncommitted, |return_slot| points
|
||||
// to the page start and |next_slot| doesn't, thus only the latter gets
|
||||
// rounded up.
|
||||
PA_DCHECK(commit_end > commit_start);
|
||||
|
||||
// The slot being returned is considered allocated.
|
||||
slot_span->num_allocated_slots++;
|
||||
// Round down, because a slot that doesn't fully fit in the new page(s) isn't
|
||||
// provisioned.
|
||||
uint16_t slots_to_provision = (commit_end - return_slot) / size;
|
||||
slot_span->num_unprovisioned_slots -= slots_to_provision;
|
||||
PA_DCHECK(slot_span->num_allocated_slots +
|
||||
slot_span->num_unprovisioned_slots <=
|
||||
get_slots_per_span());
|
||||
|
||||
// If lazy commit is enabled, meaning system pages in the slot span come
|
||||
// in an initially decommitted state, commit them here.
|
||||
// Note, we can't use PageKeepPermissionsIfPossible, because we have no
|
||||
// knowledge which pages have been committed before (it doesn't matter on
|
||||
// Windows anyway).
|
||||
if (root->use_lazy_commit) {
|
||||
// TODO(lizeb): Handle commit failure.
|
||||
root->RecommitSystemPagesForData(commit_start, commit_end - commit_start,
|
||||
PageUpdatePermissions);
|
||||
}
|
||||
|
||||
// Add all slots that fit within so far committed pages to the free list.
|
||||
PartitionFreelistEntry* prev_entry = nullptr;
|
||||
char* next_slot_end = next_slot + size;
|
||||
size_t free_list_entries_added = 0;
|
||||
while (next_slot_end <= commit_end) {
|
||||
auto* entry = new (next_slot) PartitionFreelistEntry();
|
||||
if (!slot_span->freelist_head) {
|
||||
PA_DCHECK(!prev_entry);
|
||||
PA_DCHECK(!free_list_entries_added);
|
||||
slot_span->SetFreelistHead(entry);
|
||||
} else {
|
||||
PA_DCHECK(free_list_entries_added);
|
||||
prev_entry->SetNext(entry);
|
||||
}
|
||||
next_slot = next_slot_end;
|
||||
next_slot_end = next_slot + size;
|
||||
prev_entry = entry;
|
||||
#if DCHECK_IS_ON()
|
||||
free_list_entries_added++;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// The only provisioned slot not added to the free list is the one being
|
||||
// returned.
|
||||
PA_DCHECK(slots_to_provision == free_list_entries_added + 1);
|
||||
// We didn't necessarily provision more than one slot (e.g. if |slot_size|
|
||||
// is large), meaning that |slot_span->freelist_head| can be nullptr.
|
||||
if (slot_span->freelist_head) {
|
||||
PA_DCHECK(free_list_entries_added);
|
||||
slot_span->freelist_head->CheckFreeList(slot_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
return return_slot;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
|
||||
SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
|
||||
if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
|
||||
return false;
|
||||
|
||||
SlotSpanMetadata<thread_safe>* next_slot_span;
|
||||
|
||||
for (; slot_span; slot_span = next_slot_span) {
|
||||
next_slot_span = slot_span->next_slot_span;
|
||||
PA_DCHECK(slot_span->bucket == this);
|
||||
PA_DCHECK(slot_span != empty_slot_spans_head);
|
||||
PA_DCHECK(slot_span != decommitted_slot_spans_head);
|
||||
|
||||
if (LIKELY(slot_span->is_active())) {
|
||||
// This slot span is usable because it has freelist entries, or has
|
||||
// unprovisioned slots we can create freelist entries from.
|
||||
active_slot_spans_head = slot_span;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Deal with empty and decommitted slot spans.
|
||||
if (LIKELY(slot_span->is_empty())) {
|
||||
slot_span->next_slot_span = empty_slot_spans_head;
|
||||
empty_slot_spans_head = slot_span;
|
||||
} else if (LIKELY(slot_span->is_decommitted())) {
|
||||
slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||
decommitted_slot_spans_head = slot_span;
|
||||
} else {
|
||||
PA_DCHECK(slot_span->is_full());
|
||||
// If we get here, we found a full slot span. Skip over it too, and also
|
||||
// mark it as full (via a negative value). We need it marked so that
|
||||
// free'ing can tell, and move it back into the active list.
|
||||
slot_span->num_allocated_slots = -slot_span->num_allocated_slots;
|
||||
++num_full_slot_spans;
|
||||
// num_full_slot_spans is a uint16_t for efficient packing so guard
|
||||
// against overflow to be safe.
|
||||
if (UNLIKELY(!num_full_slot_spans))
|
||||
OnFull();
|
||||
// Not necessary but might help stop accidents.
|
||||
slot_span->next_slot_span = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
active_slot_spans_head =
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
|
||||
return false;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void* PartitionBucket<thread_safe>::SlowPathAlloc(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t raw_size,
|
||||
size_t slot_span_alignment,
|
||||
bool* is_already_zeroed) {
|
||||
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
|
||||
bits::IsPowerOfTwo(slot_span_alignment));
|
||||
|
||||
// The slow path is called when the freelist is empty. The only exception is
|
||||
// when a higher-order alignment is requested, in which case the freelist
|
||||
// logic is bypassed and we go directly for slot span allocation.
|
||||
bool allocate_aligned_slot_span = slot_span_alignment > PartitionPageSize();
|
||||
PA_DCHECK(!active_slot_spans_head->freelist_head ||
|
||||
allocate_aligned_slot_span);
|
||||
|
||||
SlotSpanMetadata<thread_safe>* new_slot_span = nullptr;
|
||||
// |new_slot_span->bucket| will always be |this|, except when |this| is the
|
||||
// sentinel bucket, which is used to signal a direct mapped allocation. In
|
||||
// this case |new_bucket| will be set properly later. This avoids a read for
|
||||
// most allocations.
|
||||
PartitionBucket* new_bucket = this;
|
||||
*is_already_zeroed = false;
|
||||
|
||||
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
|
||||
// marked as special cases. We bounce them through to the slow path so that
|
||||
// we can still have a blazing fast hot path due to lack of corner-case
|
||||
// branches.
|
||||
//
|
||||
// Note: The ordering of the conditionals matter! In particular,
|
||||
// SetNewActiveSlotSpan() has a side-effect even when returning
|
||||
// false where it sweeps the active list and may move things into the empty or
|
||||
// decommitted lists which affects the subsequent conditional.
|
||||
if (UNLIKELY(is_direct_mapped())) {
|
||||
PA_DCHECK(raw_size > kMaxBucketed);
|
||||
PA_DCHECK(this == &root->sentinel_bucket);
|
||||
PA_DCHECK(active_slot_spans_head ==
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
|
||||
// No fast path for direct-mapped allocations.
|
||||
if (flags & PartitionAllocFastPathOrReturnNull)
|
||||
return nullptr;
|
||||
|
||||
new_slot_span =
|
||||
PartitionDirectMap(root, flags, raw_size, slot_span_alignment);
|
||||
if (new_slot_span)
|
||||
new_bucket = new_slot_span->bucket;
|
||||
// Memory from PageAllocator is always zeroed.
|
||||
*is_already_zeroed = true;
|
||||
} else if (LIKELY(!allocate_aligned_slot_span && SetNewActiveSlotSpan())) {
|
||||
// First, did we find an active slot span in the active list?
|
||||
new_slot_span = active_slot_spans_head;
|
||||
PA_DCHECK(new_slot_span->is_active());
|
||||
} else if (LIKELY(!allocate_aligned_slot_span &&
|
||||
(empty_slot_spans_head != nullptr ||
|
||||
decommitted_slot_spans_head != nullptr))) {
|
||||
// Second, look in our lists of empty and decommitted slot spans.
|
||||
// Check empty slot spans first, which are preferred, but beware that an
|
||||
// empty slot span might have been decommitted.
|
||||
while (LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
|
||||
PA_DCHECK(new_slot_span->bucket == this);
|
||||
PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
|
||||
empty_slot_spans_head = new_slot_span->next_slot_span;
|
||||
// Accept the empty slot span unless it got decommitted.
|
||||
if (new_slot_span->freelist_head) {
|
||||
new_slot_span->next_slot_span = nullptr;
|
||||
new_slot_span->ToSuperPageExtent()
|
||||
->IncrementNumberOfNonemptySlotSpans();
|
||||
break;
|
||||
}
|
||||
PA_DCHECK(new_slot_span->is_decommitted());
|
||||
new_slot_span->next_slot_span = decommitted_slot_spans_head;
|
||||
decommitted_slot_spans_head = new_slot_span;
|
||||
}
|
||||
if (UNLIKELY(!new_slot_span) &&
|
||||
LIKELY(decommitted_slot_spans_head != nullptr)) {
|
||||
// Commit can be expensive, don't do it.
|
||||
if (flags & PartitionAllocFastPathOrReturnNull)
|
||||
return nullptr;
|
||||
|
||||
new_slot_span = decommitted_slot_spans_head;
|
||||
PA_DCHECK(new_slot_span->bucket == this);
|
||||
PA_DCHECK(new_slot_span->is_decommitted());
|
||||
decommitted_slot_spans_head = new_slot_span->next_slot_span;
|
||||
|
||||
// If lazy commit is enabled, pages will be recommitted when provisioning
|
||||
// slots, in ProvisionMoreSlotsAndAllocOne(), not here.
|
||||
if (!root->use_lazy_commit) {
|
||||
void* addr =
|
||||
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(new_slot_span);
|
||||
// If lazy commit was never used, we have a guarantee that all slot span
|
||||
// pages have been previously committed, and then decommitted using
|
||||
// PageKeepPermissionsIfPossible, so use the same option as an
|
||||
// optimization. Otherwise fall back to PageUpdatePermissions (slower).
|
||||
// (Insider knowledge: as of writing this comment, lazy commit is only
|
||||
// used on Windows and this flag is ignored there, thus no perf impact.)
|
||||
// TODO(lizeb): Handle commit failure.
|
||||
root->RecommitSystemPagesForData(
|
||||
addr, new_slot_span->bucket->get_bytes_per_span(),
|
||||
root->never_used_lazy_commit ? PageKeepPermissionsIfPossible
|
||||
: PageUpdatePermissions);
|
||||
}
|
||||
|
||||
new_slot_span->Reset();
|
||||
*is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
|
||||
}
|
||||
PA_DCHECK(new_slot_span);
|
||||
} else {
|
||||
// Getting a new slot span is expensive, don't do it.
|
||||
if (flags & PartitionAllocFastPathOrReturnNull)
|
||||
return nullptr;
|
||||
|
||||
// Third. If we get here, we need a brand new slot span.
|
||||
// TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
|
||||
// as slot_span_committed_size.
|
||||
new_slot_span = AllocNewSlotSpan(root, flags, slot_span_alignment);
|
||||
// New memory from PageAllocator is always zeroed.
|
||||
*is_already_zeroed = true;
|
||||
}
|
||||
|
||||
// Bail if we had a memory allocation failure.
|
||||
if (UNLIKELY(!new_slot_span)) {
|
||||
PA_DCHECK(active_slot_spans_head ==
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
|
||||
if (flags & PartitionAllocReturnNull)
|
||||
return nullptr;
|
||||
// See comment in PartitionDirectMap() for unlocking.
|
||||
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
|
||||
root->OutOfMemory(raw_size);
|
||||
IMMEDIATE_CRASH(); // Not required, kept as documentation.
|
||||
}
|
||||
|
||||
PA_DCHECK(new_bucket != &root->sentinel_bucket);
|
||||
new_bucket->active_slot_spans_head = new_slot_span;
|
||||
if (new_slot_span->CanStoreRawSize())
|
||||
new_slot_span->SetRawSize(raw_size);
|
||||
|
||||
// If we found an active slot span with free slots, or an empty slot span, we
|
||||
// have a usable freelist head.
|
||||
if (LIKELY(new_slot_span->freelist_head != nullptr)) {
|
||||
PartitionFreelistEntry* entry = new_slot_span->freelist_head;
|
||||
PartitionFreelistEntry* new_head = entry->GetNext(slot_size);
|
||||
new_slot_span->SetFreelistHead(new_head);
|
||||
new_slot_span->num_allocated_slots++;
|
||||
|
||||
// We likely set *is_already_zeroed to true above, make sure that the
|
||||
// freelist entry doesn't contain data.
|
||||
return entry->ClearForAllocation();
|
||||
}
|
||||
|
||||
// Otherwise, we need to provision more slots by committing more pages. Build
|
||||
// the free list for the newly provisioned slots.
|
||||
PA_DCHECK(new_slot_span->num_unprovisioned_slots);
|
||||
return ProvisionMoreSlotsAndAllocOne(root, new_slot_span);
|
||||
}
|
||||
|
||||
template struct PartitionBucket<ThreadSafe>;
|
||||
template struct PartitionBucket<NotThreadSafe>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
188
src/base/allocator/partition_allocator/partition_bucket.h
Normal file
188
src/base/allocator/partition_allocator/partition_bucket.h
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/thread_annotations.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
struct PartitionBucket {
|
||||
// Accessed most in hot path => goes first. Only nullptr for invalid buckets,
|
||||
// may be pointing to the sentinel.
|
||||
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
|
||||
|
||||
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
|
||||
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
|
||||
uint32_t slot_size;
|
||||
uint32_t num_system_pages_per_slot_span : 8;
|
||||
uint32_t num_full_slot_spans : 24;
|
||||
|
||||
// `slot_size_reciprocal` is used to improve the performance of
|
||||
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
|
||||
// chosen to provide the desired accuracy. As a result, we can replace a slow
|
||||
// integer division (or modulo) operation with a pair of multiplication and a
|
||||
// bit shift, i.e. `value / size` becomes `(value * size_reciprocal) >> M`.
|
||||
uint64_t slot_size_reciprocal;
|
||||
|
||||
// This is `M` from the formula above. For accurate results, both `value` and
|
||||
// `size`, which are bound by `kMaxBucketed` for our purposes, must be less
|
||||
// than `2 ** (M / 2)`. On the other hand, the result of the expression
|
||||
// `3 * M / 2` must be less than 64, otherwise integer overflow can occur.
|
||||
static constexpr uint64_t kReciprocalShift = 42;
|
||||
static constexpr uint64_t kReciprocalMask = (1ull << kReciprocalShift) - 1;
|
||||
static_assert(
|
||||
kMaxBucketed < (1 << (kReciprocalShift / 2)),
|
||||
"GetSlotOffset may produce an incorrect result when kMaxBucketed is too "
|
||||
"large.");
|
||||
|
||||
// Public API.
|
||||
void Init(uint32_t new_slot_size);
|
||||
|
||||
// Sets |is_already_zeroed| to true if the allocation was satisfied by
|
||||
// requesting (a) new page(s) from the operating system, or false otherwise.
|
||||
// This enables an optimization for when callers use |PartitionAllocZeroFill|:
|
||||
// there is no need to call memset on fresh pages; the OS has already zeroed
|
||||
// them. (See |PartitionRoot::AllocFromBucket|.)
|
||||
//
|
||||
// Note the matching Free() functions are in SlotSpanMetadata.
|
||||
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t raw_size,
|
||||
size_t slot_span_alignment,
|
||||
bool* is_already_zeroed)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
|
||||
ALWAYS_INLINE bool CanStoreRawSize() const {
|
||||
// For direct-map as well as single-slot slot spans (recognized by checking
|
||||
// against |MaxRegularSlotSpanSize()|), we have some spare metadata space in
|
||||
// subsequent PartitionPage to store the raw size. It isn't only metadata
|
||||
// space though, slot spans that have more than one slot can't have raw size
|
||||
// stored, because we wouldn't know which slot it applies to.
|
||||
if (LIKELY(slot_size <= MaxRegularSlotSpanSize()))
|
||||
return false;
|
||||
|
||||
PA_DCHECK((slot_size % SystemPageSize()) == 0);
|
||||
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Some buckets are pseudo-buckets, which are disabled because they would
|
||||
// otherwise not fulfill alignment constraints.
|
||||
ALWAYS_INLINE bool is_valid() const {
|
||||
return active_slot_spans_head != nullptr;
|
||||
}
|
||||
ALWAYS_INLINE bool is_direct_mapped() const {
|
||||
return !num_system_pages_per_slot_span;
|
||||
}
|
||||
ALWAYS_INLINE size_t get_bytes_per_span() const {
|
||||
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
|
||||
// https://crbug.com/680657
|
||||
return num_system_pages_per_slot_span << SystemPageShift();
|
||||
}
|
||||
ALWAYS_INLINE uint16_t get_slots_per_span() const {
|
||||
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
|
||||
// https://crbug.com/680657
|
||||
return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
|
||||
}
|
||||
// Returns a natural number of partition pages (calculated by
|
||||
// get_system_pages_per_slot_span()) to allocate from the current
|
||||
// super page when the bucket runs out of slots.
|
||||
ALWAYS_INLINE uint16_t get_pages_per_slot_span() const {
|
||||
// Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
|
||||
return (num_system_pages_per_slot_span +
|
||||
(NumSystemPagesPerPartitionPage() - 1)) /
|
||||
NumSystemPagesPerPartitionPage();
|
||||
}
|
||||
|
||||
// This helper function scans a bucket's active slot span list for a suitable
|
||||
// new active slot span. When it finds a suitable new active slot span (one
|
||||
// that has free slots and is not empty), it is set as the new active slot
|
||||
// span. If there is no suitable new active slot span, the current active slot
|
||||
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
|
||||
// slot spans are scanned, they are tidied up according to their state. Empty
|
||||
// slot spans are swept on to the empty list, decommitted slot spans on to the
|
||||
// decommitted list and full slot spans are unlinked from any list.
|
||||
//
|
||||
// This is where the guts of the bucket maintenance is done!
|
||||
bool SetNewActiveSlotSpan();
|
||||
|
||||
// Returns a slot number starting from the beginning of the slot span.
|
||||
ALWAYS_INLINE size_t GetSlotNumber(size_t offset_in_slot_span) {
|
||||
// See the static assertion for `kReciprocalShift` above.
|
||||
PA_DCHECK(offset_in_slot_span <= kMaxBucketed);
|
||||
PA_DCHECK(slot_size <= kMaxBucketed);
|
||||
|
||||
const size_t offset_in_slot =
|
||||
((offset_in_slot_span * slot_size_reciprocal) >> kReciprocalShift);
|
||||
PA_DCHECK(offset_in_slot_span / slot_size == offset_in_slot);
|
||||
|
||||
return offset_in_slot;
|
||||
}
|
||||
|
||||
private:
|
||||
static NOINLINE void OnFull();
|
||||
|
||||
// Returns the number of system pages in a slot span.
|
||||
//
|
||||
// The calculation attempts to find the best number of system pages to
|
||||
// allocate for the given slot_size to minimize wasted space. It uses a
|
||||
// heuristic that looks at number of bytes wasted after the last slot and
|
||||
// attempts to account for the PTE usage of each system page.
|
||||
uint8_t get_system_pages_per_slot_span();
|
||||
|
||||
// Allocates a new slot span with size |num_partition_pages| from the
|
||||
// current extent. Metadata within this slot span will be initialized.
|
||||
// Returns nullptr on error.
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>* AllocNewSlotSpan(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
int flags,
|
||||
size_t slot_span_alignment) EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
|
||||
// Allocates a new super page from the current extent. All slot-spans will be
|
||||
// in the decommitted state. Returns nullptr on error.
|
||||
ALWAYS_INLINE void* AllocNewSuperPage(PartitionRoot<thread_safe>* root,
|
||||
int flags)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
|
||||
// Each bucket allocates a slot span when it runs out of slots.
|
||||
// A slot span's size is equal to get_pages_per_slot_span() number of
|
||||
// partition pages. This function initializes all PartitionPage within the
|
||||
// span to point to the first PartitionPage which holds all the metadata
|
||||
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
|
||||
// as the owner of the span. It does NOT put the slots into the bucket's
|
||||
// freelist.
|
||||
ALWAYS_INLINE void InitializeSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span);
|
||||
|
||||
// Commit 1 or more pages in |slot_span|, enough to get the next slot, which
|
||||
// is returned by this function. If more slots fit into the committed pages,
|
||||
// they'll be added to the free list of the slot span (note that next pointers
|
||||
// are stored inside the slots).
|
||||
// The free list must be empty when calling this function.
|
||||
//
|
||||
// If |slot_span| was freshly allocated, it must have been passed through
|
||||
// InitializeSlotSpan() first.
|
||||
ALWAYS_INLINE char* ProvisionMoreSlotsAndAllocOne(
|
||||
PartitionRoot<thread_safe>* root,
|
||||
SlotSpanMetadata<thread_safe>* slot_span)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
|
227
src/base/allocator/partition_allocator/partition_bucket_lookup.h
Normal file
227
src/base/allocator/partition_allocator/partition_bucket_lookup.h
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace {
|
||||
|
||||
// Precalculate some shift and mask constants used in the hot path.
|
||||
// Example: malloc(41) == 101001 binary.
|
||||
// Order is 6 (1 << 6-1) == 32 is highest bit set.
|
||||
// order_index is the next three MSB == 010 == 2.
|
||||
// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
|
||||
// for the sub_order_index).
|
||||
constexpr uint8_t OrderIndexShift(uint8_t order) {
|
||||
if (order < kNumBucketsPerOrderBits + 1)
|
||||
return 0;
|
||||
|
||||
return order - (kNumBucketsPerOrderBits + 1);
|
||||
}
|
||||
|
||||
constexpr size_t OrderSubIndexMask(uint8_t order) {
|
||||
if (order == kBitsPerSizeT)
|
||||
return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
|
||||
|
||||
return ((static_cast<size_t>(1) << order) - 1) >>
|
||||
(kNumBucketsPerOrderBits + 1);
|
||||
}
|
||||
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
#define BITS_PER_SIZE_T 64
|
||||
static_assert(kBitsPerSizeT == 64, "");
|
||||
#else
|
||||
#define BITS_PER_SIZE_T 32
|
||||
static_assert(kBitsPerSizeT == 32, "");
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
constexpr uint8_t kOrderIndexShift[BITS_PER_SIZE_T + 1] = {
|
||||
OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),
|
||||
OrderIndexShift(3), OrderIndexShift(4), OrderIndexShift(5),
|
||||
OrderIndexShift(6), OrderIndexShift(7), OrderIndexShift(8),
|
||||
OrderIndexShift(9), OrderIndexShift(10), OrderIndexShift(11),
|
||||
OrderIndexShift(12), OrderIndexShift(13), OrderIndexShift(14),
|
||||
OrderIndexShift(15), OrderIndexShift(16), OrderIndexShift(17),
|
||||
OrderIndexShift(18), OrderIndexShift(19), OrderIndexShift(20),
|
||||
OrderIndexShift(21), OrderIndexShift(22), OrderIndexShift(23),
|
||||
OrderIndexShift(24), OrderIndexShift(25), OrderIndexShift(26),
|
||||
OrderIndexShift(27), OrderIndexShift(28), OrderIndexShift(29),
|
||||
OrderIndexShift(30), OrderIndexShift(31), OrderIndexShift(32),
|
||||
#if BITS_PER_SIZE_T == 64
|
||||
OrderIndexShift(33), OrderIndexShift(34), OrderIndexShift(35),
|
||||
OrderIndexShift(36), OrderIndexShift(37), OrderIndexShift(38),
|
||||
OrderIndexShift(39), OrderIndexShift(40), OrderIndexShift(41),
|
||||
OrderIndexShift(42), OrderIndexShift(43), OrderIndexShift(44),
|
||||
OrderIndexShift(45), OrderIndexShift(46), OrderIndexShift(47),
|
||||
OrderIndexShift(48), OrderIndexShift(49), OrderIndexShift(50),
|
||||
OrderIndexShift(51), OrderIndexShift(52), OrderIndexShift(53),
|
||||
OrderIndexShift(54), OrderIndexShift(55), OrderIndexShift(56),
|
||||
OrderIndexShift(57), OrderIndexShift(58), OrderIndexShift(59),
|
||||
OrderIndexShift(60), OrderIndexShift(61), OrderIndexShift(62),
|
||||
OrderIndexShift(63), OrderIndexShift(64)
|
||||
#endif
|
||||
};
|
||||
|
||||
constexpr size_t kOrderSubIndexMask[BITS_PER_SIZE_T + 1] = {
|
||||
OrderSubIndexMask(0), OrderSubIndexMask(1), OrderSubIndexMask(2),
|
||||
OrderSubIndexMask(3), OrderSubIndexMask(4), OrderSubIndexMask(5),
|
||||
OrderSubIndexMask(6), OrderSubIndexMask(7), OrderSubIndexMask(8),
|
||||
OrderSubIndexMask(9), OrderSubIndexMask(10), OrderSubIndexMask(11),
|
||||
OrderSubIndexMask(12), OrderSubIndexMask(13), OrderSubIndexMask(14),
|
||||
OrderSubIndexMask(15), OrderSubIndexMask(16), OrderSubIndexMask(17),
|
||||
OrderSubIndexMask(18), OrderSubIndexMask(19), OrderSubIndexMask(20),
|
||||
OrderSubIndexMask(21), OrderSubIndexMask(22), OrderSubIndexMask(23),
|
||||
OrderSubIndexMask(24), OrderSubIndexMask(25), OrderSubIndexMask(26),
|
||||
OrderSubIndexMask(27), OrderSubIndexMask(28), OrderSubIndexMask(29),
|
||||
OrderSubIndexMask(30), OrderSubIndexMask(31), OrderSubIndexMask(32),
|
||||
#if BITS_PER_SIZE_T == 64
|
||||
OrderSubIndexMask(33), OrderSubIndexMask(34), OrderSubIndexMask(35),
|
||||
OrderSubIndexMask(36), OrderSubIndexMask(37), OrderSubIndexMask(38),
|
||||
OrderSubIndexMask(39), OrderSubIndexMask(40), OrderSubIndexMask(41),
|
||||
OrderSubIndexMask(42), OrderSubIndexMask(43), OrderSubIndexMask(44),
|
||||
OrderSubIndexMask(45), OrderSubIndexMask(46), OrderSubIndexMask(47),
|
||||
OrderSubIndexMask(48), OrderSubIndexMask(49), OrderSubIndexMask(50),
|
||||
OrderSubIndexMask(51), OrderSubIndexMask(52), OrderSubIndexMask(53),
|
||||
OrderSubIndexMask(54), OrderSubIndexMask(55), OrderSubIndexMask(56),
|
||||
OrderSubIndexMask(57), OrderSubIndexMask(58), OrderSubIndexMask(59),
|
||||
OrderSubIndexMask(60), OrderSubIndexMask(61), OrderSubIndexMask(62),
|
||||
OrderSubIndexMask(63), OrderSubIndexMask(64)
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
// The class used to generate the bucket lookup table at compile-time.
|
||||
class BucketIndexLookup final {
|
||||
public:
|
||||
ALWAYS_INLINE constexpr static size_t GetIndex(size_t size);
|
||||
|
||||
constexpr BucketIndexLookup() {
|
||||
constexpr uint16_t sentinel_bucket_index = kNumBuckets;
|
||||
|
||||
InitBucketSizes();
|
||||
|
||||
uint16_t* bucket_index_ptr = &bucket_index_lookup_[0];
|
||||
uint16_t bucket_index = 0;
|
||||
|
||||
// Very small allocations, smaller than the first bucketed order ->
|
||||
// everything goes to the first bucket.
|
||||
for (uint8_t order = 0; order < kMinBucketedOrder; ++order) {
|
||||
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
|
||||
*bucket_index_ptr++ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Normal buckets.
|
||||
for (uint8_t order = kMinBucketedOrder; order <= kMaxBucketedOrder;
|
||||
++order) {
|
||||
size_t size = static_cast<size_t>(1) << (order - 1);
|
||||
size_t current_increment = size >> kNumBucketsPerOrderBits;
|
||||
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
|
||||
*bucket_index_ptr++ = bucket_index;
|
||||
|
||||
// For small sizes, buckets are close together (current_increment is
|
||||
// small). For instance, for:
|
||||
// - kAlignment == 16 (which is the case on most 64 bit systems)
|
||||
// - kNumBucketsPerOrder == 4
|
||||
//
|
||||
// The 3 next buckets after 16 are {20, 24, 28}. None of these are a
|
||||
// multiple of kAlignment, so they use the next bucket, that is 32 here.
|
||||
if (size % kAlignment != 0) {
|
||||
PA_DCHECK(bucket_sizes_[bucket_index] > size);
|
||||
// Do not increment bucket_index, since in the example above
|
||||
// current_size may be 20, and bucket_sizes_[bucket_index] == 32.
|
||||
} else {
|
||||
PA_DCHECK(bucket_sizes_[bucket_index] == size);
|
||||
bucket_index++;
|
||||
}
|
||||
|
||||
size += current_increment;
|
||||
}
|
||||
}
|
||||
|
||||
// Direct-mapped, and overflow.
|
||||
for (uint8_t order = kMaxBucketedOrder + 1; order <= kBitsPerSizeT;
|
||||
++order) {
|
||||
for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
|
||||
*bucket_index_ptr++ = sentinel_bucket_index;
|
||||
}
|
||||
}
|
||||
|
||||
// Smaller because some buckets are not valid due to alignment constraints.
|
||||
PA_DCHECK(bucket_index < kNumBuckets);
|
||||
PA_DCHECK(bucket_index_ptr == bucket_index_lookup_ + ((kBitsPerSizeT + 1) *
|
||||
kNumBucketsPerOrder));
|
||||
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
|
||||
// which tries to overflow to a non-existent order.
|
||||
*bucket_index_ptr = sentinel_bucket_index;
|
||||
}
|
||||
constexpr const size_t* bucket_sizes() const { return &bucket_sizes_[0]; }
|
||||
|
||||
private:
|
||||
constexpr void InitBucketSizes() {
|
||||
size_t current_size = kSmallestBucket;
|
||||
size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
|
||||
size_t* bucket_size = &bucket_sizes_[0];
|
||||
for (size_t i = 0; i < kNumBucketedOrders; ++i) {
|
||||
for (size_t j = 0; j < kNumBucketsPerOrder; ++j) {
|
||||
// All bucket sizes have to be multiples of kAlignment, skip otherwise.
|
||||
if (current_size % kAlignment == 0) {
|
||||
*bucket_size = current_size;
|
||||
++bucket_size;
|
||||
}
|
||||
current_size += current_increment;
|
||||
}
|
||||
current_increment <<= 1;
|
||||
}
|
||||
|
||||
// The remaining buckets are invalid.
|
||||
while (bucket_size < bucket_sizes_ + kNumBuckets) {
|
||||
*(bucket_size++) = kInvalidBucketSize;
|
||||
}
|
||||
}
|
||||
|
||||
size_t bucket_sizes_[kNumBuckets]{};
|
||||
// The bucket lookup table lets us map a size_t to a bucket quickly.
|
||||
// The trailing +1 caters for the overflow case for very large allocation
|
||||
// sizes. It is one flat array instead of a 2D array because in the 2D
|
||||
// world, we'd need to index array[blah][max+1] which risks undefined
|
||||
// behavior.
|
||||
uint16_t
|
||||
bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
|
||||
};
|
||||
|
||||
// static
|
||||
ALWAYS_INLINE constexpr size_t BucketIndexLookup::GetIndex(size_t size) {
|
||||
// This forces the bucket table to be constant-initialized and immediately
|
||||
// materialized in the binary.
|
||||
constexpr BucketIndexLookup lookup{};
|
||||
const uint8_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
|
||||
// The order index is simply the next few bits after the most significant
|
||||
// bit.
|
||||
const size_t order_index =
|
||||
(size >> kOrderIndexShift[order]) & (kNumBucketsPerOrder - 1);
|
||||
// And if the remaining bits are non-zero we must bump the bucket up.
|
||||
const size_t sub_order_index = size & kOrderSubIndexMask[order];
|
||||
const uint16_t index =
|
||||
lookup.bucket_index_lookup_[(order << kNumBucketsPerOrderBits) +
|
||||
order_index + !!sub_order_index];
|
||||
PA_DCHECK(index <= kNumBuckets); // Last one is the sentinel bucket.
|
||||
return index;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
|
50
src/base/allocator/partition_allocator/partition_cookie.h
Normal file
50
src/base/allocator/partition_allocator/partition_cookie.h
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
static constexpr size_t kCookieSize = 16;
|
||||
|
||||
// Cookie is enabled for debug builds.
|
||||
#if DCHECK_IS_ON()
|
||||
|
||||
static constexpr unsigned char kCookieValue[kCookieSize] = {
|
||||
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
|
||||
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
|
||||
|
||||
constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
|
||||
|
||||
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
|
||||
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
|
||||
PA_DCHECK(*cookie_ptr == kCookieValue[i]);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
|
||||
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
|
||||
*cookie_ptr = kCookieValue[i];
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
constexpr size_t kPartitionCookieSizeAdjustment = 0;
|
||||
|
||||
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {}
|
||||
|
||||
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {}
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
|
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_bucket.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
struct PartitionDirectMapExtent {
|
||||
PartitionDirectMapExtent<thread_safe>* next_extent;
|
||||
PartitionDirectMapExtent<thread_safe>* prev_extent;
|
||||
PartitionBucket<thread_safe>* bucket;
|
||||
// Size of the entire reservation, including guard pages, meta-data,
|
||||
// padding for alignment before allocation, and padding for granularity at the
|
||||
// end of the allocation.
|
||||
size_t reservation_size;
|
||||
// Padding between the first partition page (guard pages + meta-data) and
|
||||
// the allocation.
|
||||
size_t padding_for_alignment;
|
||||
|
||||
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span);
|
||||
};
|
||||
|
||||
// Metadata page for direct-mapped allocations.
|
||||
template <bool thread_safe>
|
||||
struct PartitionDirectMapMetadata {
|
||||
PartitionPage<thread_safe> page;
|
||||
PartitionPage<thread_safe> subsequent_page;
|
||||
PartitionBucket<thread_safe> bucket;
|
||||
PartitionDirectMapExtent<thread_safe> direct_map_extent;
|
||||
};
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
|
||||
PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span->bucket->is_direct_mapped());
|
||||
// |*slot_span| is the first field of |PartitionDirectMapMetadata|, just cast.
|
||||
auto* metadata =
|
||||
reinterpret_cast<PartitionDirectMapMetadata<thread_safe>*>(slot_span);
|
||||
PA_DCHECK(&metadata->page.slot_span_metadata == slot_span);
|
||||
return &metadata->direct_map_extent;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
|
@ -0,0 +1,282 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "base/sys_byteorder.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
#if defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
|
||||
[[noreturn]] NOINLINE void FreelistCorruptionDetected(size_t extra) {
|
||||
// Make it visible in minidumps.
|
||||
//
|
||||
// To make the size stick out, surround it with two easily recognizable
|
||||
// patterns: 0xffffffff..
|
||||
// Locally, one can use "x/3g <%rsp address>" in GDB to see the value on
|
||||
// x86_64.
|
||||
size_t before = ~0;
|
||||
base::debug::Alias(&before);
|
||||
|
||||
size_t tmp_extra = extra;
|
||||
base::debug::Alias(&tmp_extra);
|
||||
|
||||
size_t after = ~0;
|
||||
base::debug::Alias(&after);
|
||||
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
#endif // defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
|
||||
|
||||
} // namespace
|
||||
|
||||
struct EncodedPartitionFreelistEntry;
|
||||
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
static_assert(kSmallestBucket >= 2 * sizeof(void*),
|
||||
"Need enough space for two pointers in freelist entries");
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
constexpr size_t kMinimalBucketSizeWithRefCount =
|
||||
(1 + sizeof(PartitionRefCount) + kSmallestBucket - 1) &
|
||||
~(kSmallestBucket - 1);
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
static_assert(
|
||||
kMinimalBucketSizeWithRefCount >=
|
||||
sizeof(PartitionRefCount) + 2 * sizeof(void*),
|
||||
"Need enough space for two pointer and one refcount in freelist entries");
|
||||
#else
|
||||
static_assert(
|
||||
kMinimalBucketSizeWithRefCount >= sizeof(PartitionRefCount) + sizeof(void*),
|
||||
"Need enough space for one pointer and one refcount in freelist entries");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Freelist entries are encoded for security reasons. See
|
||||
// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
|
||||
// the rationale and mechanism, respectively.
|
||||
class PartitionFreelistEntry {
|
||||
public:
|
||||
PartitionFreelistEntry() { SetNext(nullptr); }
|
||||
~PartitionFreelistEntry() = delete;
|
||||
|
||||
// Creates a new entry, with |next| following it.
|
||||
static ALWAYS_INLINE PartitionFreelistEntry* InitForThreadCache(
|
||||
void* slot_start,
|
||||
PartitionFreelistEntry* next) {
|
||||
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(slot_start);
|
||||
// ThreadCache freelists can point to entries across superpage boundaries,
|
||||
// no check contrary to |SetNext()|.
|
||||
entry->SetNextInternal(next);
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Placement new only.
|
||||
void* operator new(size_t) = delete;
|
||||
void operator delete(void* ptr) = delete;
|
||||
void* operator new(size_t, void* buffer) { return buffer; }
|
||||
|
||||
ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
|
||||
PartitionFreelistEntry* ptr) {
|
||||
return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
|
||||
}
|
||||
|
||||
// Puts |extra| on the stack before crashing in case of memory
|
||||
// corruption. Meant to be used to report the failed allocation size.
|
||||
ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache(
|
||||
size_t extra) const;
|
||||
ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t extra) const;
|
||||
|
||||
NOINLINE void CheckFreeList(size_t extra) const {
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
for (auto* entry = this; entry; entry = entry->GetNext(extra)) {
|
||||
// |GetNext()| checks freelist integrity.
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
NOINLINE void CheckFreeListForThreadCache(size_t extra) const {
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
for (auto* entry = this; entry;
|
||||
entry = entry->GetNextForThreadCache(extra)) {
|
||||
// |GetNextForThreadCache()| checks freelist integrity.
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
|
||||
// SetNext() is either called on the freelist head, when provisioning new
|
||||
// slots, or when GetNext() has been called before, no need to pass the
|
||||
// size.
|
||||
#if DCHECK_IS_ON()
|
||||
// Regular freelists always point to an entry within the same super page.
|
||||
//
|
||||
// This is most likely a PartitionAlloc bug if this triggers.
|
||||
if (UNLIKELY(ptr &&
|
||||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) !=
|
||||
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) {
|
||||
FreelistCorruptionDetected(0);
|
||||
}
|
||||
#endif // DCHECK_IS_ON()
|
||||
SetNextInternal(ptr);
|
||||
}
|
||||
|
||||
// Zeroes out |this| before returning it.
|
||||
ALWAYS_INLINE void* ClearForAllocation() {
|
||||
next_ = nullptr;
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
inverted_next_ = 0;
|
||||
#endif
|
||||
return reinterpret_cast<void*>(this);
|
||||
}
|
||||
|
||||
private:
|
||||
friend struct EncodedPartitionFreelistEntry;
|
||||
ALWAYS_INLINE static void* Transform(void* ptr) {
|
||||
// We use bswap on little endian as a fast mask for two reasons:
|
||||
// 1) If an object is freed and its vtable used where the attacker doesn't
|
||||
// get the chance to run allocations between the free and use, the vtable
|
||||
// dereference is likely to fault.
|
||||
// 2) If the attacker has a linear buffer overflow and elects to try and
|
||||
// corrupt a freelist pointer, partial pointer overwrite attacks are
|
||||
// thwarted.
|
||||
// For big endian, similar guarantees are arrived at with a negation.
|
||||
#if defined(ARCH_CPU_BIG_ENDIAN)
|
||||
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
|
||||
#else
|
||||
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
|
||||
#endif
|
||||
return reinterpret_cast<void*>(masked);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr) {
|
||||
next_ = Encode(ptr);
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
inverted_next_ = ~reinterpret_cast<uintptr_t>(next_);
|
||||
#endif
|
||||
}
|
||||
|
||||
ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal(
|
||||
size_t extra,
|
||||
bool for_thread_cache) const;
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
static ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here,
|
||||
const PartitionFreelistEntry* next,
|
||||
bool for_thread_cache) {
|
||||
// Don't allow the freelist to be blindly followed to any location.
|
||||
// Checks two constraints:
|
||||
// - here and next must belong to the same superpage, unless this is in the
|
||||
// thread cache (they even always belong to the same slot span).
|
||||
// - next cannot point inside the metadata area.
|
||||
//
|
||||
// Also, the lightweight UaF detection (pointer shadow) is checked.
|
||||
|
||||
uintptr_t here_address = reinterpret_cast<uintptr_t>(here);
|
||||
uintptr_t next_address = reinterpret_cast<uintptr_t>(next);
|
||||
|
||||
bool shadow_ptr_ok =
|
||||
~reinterpret_cast<uintptr_t>(here->next_) == here->inverted_next_;
|
||||
|
||||
bool same_superpage = (here_address & kSuperPageBaseMask) ==
|
||||
(next_address & kSuperPageBaseMask);
|
||||
// This is necessary but not sufficient when quarantine is enabled, see
|
||||
// SuperPagePayloadBegin() in partition_page.h. However we don't want to
|
||||
// fetch anything from the root in this function.
|
||||
bool not_in_metadata =
|
||||
(next_address & kSuperPageOffsetMask) >= PartitionPageSize();
|
||||
|
||||
if (for_thread_cache)
|
||||
return shadow_ptr_ok & not_in_metadata;
|
||||
else
|
||||
return shadow_ptr_ok & same_superpage & not_in_metadata;
|
||||
}
|
||||
#endif // defined(PA_HAS_FREELIST_HARDENING)
|
||||
|
||||
EncodedPartitionFreelistEntry* next_;
|
||||
// This is intended to detect unintentional corruptions of the freelist.
|
||||
// These can happen due to a Use-after-Free, or overflow of the previous
|
||||
// allocation in the slot span.
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
uintptr_t inverted_next_;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct EncodedPartitionFreelistEntry {
|
||||
char scrambled[sizeof(PartitionFreelistEntry*)];
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
char copy_of_scrambled[sizeof(PartitionFreelistEntry*)];
|
||||
#endif
|
||||
|
||||
EncodedPartitionFreelistEntry() = delete;
|
||||
~EncodedPartitionFreelistEntry() = delete;
|
||||
|
||||
ALWAYS_INLINE static PartitionFreelistEntry* Decode(
|
||||
EncodedPartitionFreelistEntry* ptr) {
|
||||
return reinterpret_cast<PartitionFreelistEntry*>(
|
||||
PartitionFreelistEntry::Transform(ptr));
|
||||
}
|
||||
};
|
||||
|
||||
static_assert(sizeof(PartitionFreelistEntry) ==
|
||||
sizeof(EncodedPartitionFreelistEntry),
|
||||
"Should not have padding");
|
||||
|
||||
ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNextInternal(
|
||||
size_t extra,
|
||||
bool for_thread_cache) const {
|
||||
auto* ret = EncodedPartitionFreelistEntry::Decode(next_);
|
||||
// GetNext() can be called on decommitted memory, in which case |next| is
|
||||
// nullptr, and none of the checks apply. Don't prefetch nullptr either.
|
||||
if (!ret)
|
||||
return nullptr;
|
||||
|
||||
#if defined(PA_HAS_FREELIST_HARDENING)
|
||||
// We rely on constant propagation to remove the branches coming from
|
||||
// |for_thread_cache|, since the argument is always a compile-time constant.
|
||||
if (UNLIKELY(!IsSane(this, ret, for_thread_cache)))
|
||||
FreelistCorruptionDetected(extra);
|
||||
#endif
|
||||
|
||||
// In real-world profiles, the load of |next_| above is responsible for a
|
||||
// large fraction of the allocation cost. However, we cannot anticipate it
|
||||
// enough since it is accessed right after we know its address.
|
||||
//
|
||||
// In the case of repeated allocations, we can prefetch the access that will
|
||||
// be done at the *next* allocation, which will touch *ret, prefetch it.
|
||||
PA_PREFETCH(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE PartitionFreelistEntry*
|
||||
PartitionFreelistEntry::GetNextForThreadCache(size_t extra) const {
|
||||
return GetNextInternal(extra, true);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
|
||||
size_t extra) const {
|
||||
return GetNextInternal(extra, false);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
|
144
src/base/allocator/partition_allocator/partition_lock.h
Normal file
144
src/base/allocator/partition_allocator/partition_lock.h
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <type_traits>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/spinning_mutex.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
template <bool thread_safe>
|
||||
class LOCKABLE MaybeSpinLock {
|
||||
public:
|
||||
void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
|
||||
void Unlock() UNLOCK_FUNCTION() {}
|
||||
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
|
||||
};
|
||||
|
||||
template <bool thread_safe>
|
||||
class SCOPED_LOCKABLE ScopedGuard {
|
||||
public:
|
||||
explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
|
||||
EXCLUSIVE_LOCK_FUNCTION(lock)
|
||||
: lock_(lock) {
|
||||
lock_.Lock();
|
||||
}
|
||||
~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
|
||||
|
||||
private:
|
||||
MaybeSpinLock<thread_safe>& lock_;
|
||||
};
|
||||
|
||||
template <bool thread_safe>
|
||||
class SCOPED_LOCKABLE ScopedUnlockGuard {
|
||||
public:
|
||||
explicit ScopedUnlockGuard(MaybeSpinLock<thread_safe>& lock)
|
||||
UNLOCK_FUNCTION(lock)
|
||||
: lock_(lock) {
|
||||
lock_.Unlock();
|
||||
}
|
||||
~ScopedUnlockGuard() EXCLUSIVE_LOCK_FUNCTION() { lock_.Lock(); }
|
||||
|
||||
private:
|
||||
MaybeSpinLock<thread_safe>& lock_;
|
||||
};
|
||||
|
||||
template <>
|
||||
class LOCKABLE MaybeSpinLock<true> {
|
||||
public:
|
||||
constexpr MaybeSpinLock() : lock_() {}
|
||||
void Lock() EXCLUSIVE_LOCK_FUNCTION() {
|
||||
#if DCHECK_IS_ON()
|
||||
// When PartitionAlloc is malloc(), it can easily become reentrant. For
|
||||
// instance, a DCHECK() triggers in external code (such as
|
||||
// base::Lock). DCHECK() error message formatting allocates, which triggers
|
||||
// PartitionAlloc, and then we get reentrancy, and in this case infinite
|
||||
// recursion.
|
||||
//
|
||||
// To avoid that, crash quickly when the code becomes reentrant.
|
||||
PlatformThreadRef current_thread = PlatformThread::CurrentRef();
|
||||
if (!lock_.Try()) {
|
||||
// The lock wasn't free when we tried to acquire it. This can be because
|
||||
// another thread or *this* thread was holding it.
|
||||
//
|
||||
// If it's this thread holding it, then it cannot have become free in the
|
||||
// meantime, and the current value of |owning_thread_ref_| is valid, as it
|
||||
// was set by this thread. Assuming that writes to |owning_thread_ref_|
|
||||
// are atomic, then if it's us, we are trying to recursively acquire a
|
||||
// non-recursive lock.
|
||||
//
|
||||
// Note that we don't rely on a DCHECK() in base::Lock(), as it would
|
||||
// itself allocate. Meaning that without this code, a reentrancy issue
|
||||
// hangs on Linux.
|
||||
if (UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
|
||||
current_thread)) {
|
||||
// Trying to acquire lock while it's held by this thread: reentrancy
|
||||
// issue.
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
lock_.Acquire();
|
||||
}
|
||||
owning_thread_ref_.store(current_thread, std::memory_order_release);
|
||||
#else
|
||||
lock_.Acquire();
|
||||
#endif
|
||||
}
|
||||
|
||||
void Unlock() UNLOCK_FUNCTION() {
|
||||
#if DCHECK_IS_ON()
|
||||
owning_thread_ref_.store(PlatformThreadRef(), std::memory_order_release);
|
||||
#endif
|
||||
lock_.Release();
|
||||
}
|
||||
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
|
||||
lock_.AssertAcquired();
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
|
||||
PlatformThread::CurrentRef());
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
SpinningMutex lock_;
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Should in theory be protected by |lock_|, but we need to read it to detect
|
||||
// recursive lock acquisition (and thus, the allocator becoming reentrant).
|
||||
std::atomic<PlatformThreadRef> owning_thread_ref_{};
|
||||
#endif
|
||||
};
|
||||
// We want PartitionRoot to not have a global destructor, so this should not
|
||||
// have one.
|
||||
static_assert(std::is_trivially_destructible<MaybeSpinLock<true>>::value, "");
|
||||
|
||||
template <>
|
||||
class LOCKABLE MaybeSpinLock<false> {
|
||||
public:
|
||||
void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
|
||||
void Unlock() UNLOCK_FUNCTION() {}
|
||||
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
|
||||
|
||||
char padding_[sizeof(MaybeSpinLock<true>)];
|
||||
};
|
||||
|
||||
static_assert(
|
||||
sizeof(MaybeSpinLock<true>) == sizeof(MaybeSpinLock<false>),
|
||||
"Sizes should be equal to ensure identical layout of PartitionRoot");
|
||||
|
||||
using PartitionLock = MaybeSpinLock<true>;
|
||||
using PartitionAutoLock = ScopedGuard<true>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
|
38
src/base/allocator/partition_allocator/partition_oom.cc
Normal file
38
src/base/allocator/partition_allocator/partition_oom.cc
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
OomFunction g_oom_handling_function = nullptr;
|
||||
|
||||
NOINLINE void NOT_TAIL_CALLED PartitionExcessiveAllocationSize(size_t size) {
|
||||
NO_CODE_FOLDING();
|
||||
OOM_CRASH(size);
|
||||
}
|
||||
|
||||
#if !defined(ARCH_CPU_64_BITS)
|
||||
NOINLINE void NOT_TAIL_CALLED
|
||||
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
|
||||
NO_CODE_FOLDING();
|
||||
OOM_CRASH(size);
|
||||
}
|
||||
|
||||
[[noreturn]] NOINLINE void NOT_TAIL_CALLED
|
||||
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
|
||||
NO_CODE_FOLDING();
|
||||
OOM_CRASH(virtual_size);
|
||||
}
|
||||
|
||||
#endif // !defined(ARCH_CPU_64_BITS)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
40
src/base/allocator/partition_allocator/partition_oom.h
Normal file
40
src/base/allocator/partition_allocator/partition_oom.h
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Holds functions for generating OOM errors from PartitionAlloc. This is
|
||||
// distinct from oom.h in that it is meant only for use in PartitionAlloc.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
typedef void (*OomFunction)(size_t);
|
||||
|
||||
namespace internal {
|
||||
|
||||
// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
|
||||
extern OomFunction g_oom_handling_function;
|
||||
|
||||
[[noreturn]] BASE_EXPORT NOINLINE void PartitionExcessiveAllocationSize(
|
||||
size_t size);
|
||||
|
||||
#if !defined(ARCH_CPU_64_BITS)
|
||||
[[noreturn]] NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(
|
||||
size_t size);
|
||||
[[noreturn]] NOINLINE void PartitionOutOfMemoryWithLargeVirtualSize(
|
||||
size_t virtual_size);
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
|
291
src/base/allocator/partition_allocator/partition_page.cc
Normal file
291
src/base/allocator/partition_allocator/partition_page.cc
Normal file
@ -0,0 +1,291 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_features.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/feature_list.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
void UnmapNow(void* reservation_start,
|
||||
size_t reservation_size,
|
||||
pool_handle giga_cage_pool);
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionDirectUnmap(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||
root->lock_.AssertAcquired();
|
||||
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
|
||||
|
||||
// Maintain the doubly-linked list of all direct mappings.
|
||||
if (extent->prev_extent) {
|
||||
PA_DCHECK(extent->prev_extent->next_extent == extent);
|
||||
extent->prev_extent->next_extent = extent->next_extent;
|
||||
} else {
|
||||
root->direct_map_list = extent->next_extent;
|
||||
}
|
||||
if (extent->next_extent) {
|
||||
PA_DCHECK(extent->next_extent->prev_extent == extent);
|
||||
extent->next_extent->prev_extent = extent->prev_extent;
|
||||
}
|
||||
|
||||
// The actual decommit is deferred below after releasing the lock.
|
||||
root->DecreaseCommittedPages(slot_span->bucket->slot_size);
|
||||
|
||||
size_t reservation_size = extent->reservation_size;
|
||||
PA_DCHECK(!(reservation_size & DirectMapAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(root->total_size_of_direct_mapped_pages >= reservation_size);
|
||||
root->total_size_of_direct_mapped_pages -= reservation_size;
|
||||
|
||||
char* reservation_start = reinterpret_cast<char*>(
|
||||
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
|
||||
// The mapping may start at an unspecified location within a super page, but
|
||||
// we always reserve memory aligned to super page size.
|
||||
reservation_start = bits::AlignDown(reservation_start, kSuperPageSize);
|
||||
|
||||
// All the metadata have been updated above, in particular the mapping has
|
||||
// been unlinked. We can safely release the memory outside the lock, which is
|
||||
// important as decommitting memory can be expensive.
|
||||
//
|
||||
// This can create a fake "address space exhaustion" OOM, in the case where
|
||||
// e.g. a large allocation is freed on a thread, and another large one is made
|
||||
// from another *before* UnmapNow() has finished running. In this case the
|
||||
// second one may not find enough space in the GigaCage, and fail. This is
|
||||
// expected to be very rare though, and likely preferable to holding the lock
|
||||
// while releasing the address space.
|
||||
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
|
||||
UnmapNow(reservation_start, reservation_size, root->ChooseGigaCagePool());
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionRegisterEmptySlotSpan(
|
||||
SlotSpanMetadata<thread_safe>* slot_span) {
|
||||
PA_DCHECK(slot_span->is_empty());
|
||||
PartitionRoot<thread_safe>* root =
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
|
||||
root->lock_.AssertAcquired();
|
||||
|
||||
slot_span->ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans();
|
||||
|
||||
// If the slot span is already registered as empty, give it another life.
|
||||
if (slot_span->empty_cache_index != -1) {
|
||||
PA_DCHECK(slot_span->empty_cache_index >= 0);
|
||||
PA_DCHECK(static_cast<unsigned>(slot_span->empty_cache_index) <
|
||||
kMaxFreeableSpans);
|
||||
PA_DCHECK(root->global_empty_slot_span_ring[slot_span->empty_cache_index] ==
|
||||
slot_span);
|
||||
root->global_empty_slot_span_ring[slot_span->empty_cache_index] = nullptr;
|
||||
}
|
||||
|
||||
int16_t current_index = root->global_empty_slot_span_ring_index;
|
||||
SlotSpanMetadata<thread_safe>* slot_span_to_decommit =
|
||||
root->global_empty_slot_span_ring[current_index];
|
||||
// The slot span might well have been re-activated, filled up, etc. before we
|
||||
// get around to looking at it here.
|
||||
if (slot_span_to_decommit)
|
||||
slot_span_to_decommit->DecommitIfPossible(root);
|
||||
|
||||
// We put the empty slot span on our global list of "slot spans that were once
|
||||
// empty". thus providing it a bit of breathing room to get re-used before
|
||||
// we really free it. This improves performance, particularly on Mac OS X
|
||||
// which has subpar memory management performance.
|
||||
root->global_empty_slot_span_ring[current_index] = slot_span;
|
||||
slot_span->empty_cache_index = current_index;
|
||||
++current_index;
|
||||
if (current_index == kMaxFreeableSpans)
|
||||
current_index = 0;
|
||||
root->global_empty_slot_span_ring_index = current_index;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
SlotSpanMetadata<thread_safe>
|
||||
SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
|
||||
return &sentinel_slot_span_;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
SlotSpanMetadata<thread_safe>::SlotSpanMetadata(
|
||||
PartitionBucket<thread_safe>* bucket)
|
||||
: bucket(bucket), can_store_raw_size(bucket->CanStoreRawSize()) {}
|
||||
|
||||
template <bool thread_safe>
|
||||
void SlotSpanMetadata<thread_safe>::FreeSlowPath() {
|
||||
#if DCHECK_IS_ON()
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||
root->lock_.AssertAcquired();
|
||||
#endif
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
if (LIKELY(num_allocated_slots == 0)) {
|
||||
// Slot span became fully unused.
|
||||
if (UNLIKELY(bucket->is_direct_mapped())) {
|
||||
PartitionDirectUnmap(this);
|
||||
return;
|
||||
}
|
||||
#if DCHECK_IS_ON()
|
||||
freelist_head->CheckFreeList(bucket->slot_size);
|
||||
#endif
|
||||
// If it's the current active slot span, change it. We bounce the slot span
|
||||
// to the empty list as a force towards defragmentation.
|
||||
if (LIKELY(this == bucket->active_slot_spans_head))
|
||||
bucket->SetNewActiveSlotSpan();
|
||||
PA_DCHECK(bucket->active_slot_spans_head != this);
|
||||
|
||||
if (CanStoreRawSize())
|
||||
SetRawSize(0);
|
||||
|
||||
PartitionRegisterEmptySlotSpan(this);
|
||||
} else {
|
||||
PA_DCHECK(!bucket->is_direct_mapped());
|
||||
// Ensure that the slot span is full. That's the only valid case if we
|
||||
// arrive here.
|
||||
PA_DCHECK(num_allocated_slots < 0);
|
||||
// A transition of num_allocated_slots from 0 to -1 is not legal, and
|
||||
// likely indicates a double-free.
|
||||
PA_CHECK(num_allocated_slots != -1);
|
||||
num_allocated_slots = -num_allocated_slots - 2;
|
||||
PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
|
||||
// Fully used slot span became partially used. It must be put back on the
|
||||
// non-full list. Also make it the current slot span to increase the
|
||||
// chances of it being filled up again. The old current slot span will be
|
||||
// the next slot span.
|
||||
PA_DCHECK(!next_slot_span);
|
||||
if (LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
|
||||
next_slot_span = bucket->active_slot_spans_head;
|
||||
bucket->active_slot_spans_head = this;
|
||||
--bucket->num_full_slot_spans;
|
||||
// Special case: for a partition slot span with just a single slot, it may
|
||||
// now be empty and we want to run it through the empty logic.
|
||||
if (UNLIKELY(num_allocated_slots == 0))
|
||||
FreeSlowPath();
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void SlotSpanMetadata<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
|
||||
root->lock_.AssertAcquired();
|
||||
PA_DCHECK(is_empty());
|
||||
PA_DCHECK(!bucket->is_direct_mapped());
|
||||
void* slot_span_start = SlotSpanMetadata::ToSlotSpanStartPtr(this);
|
||||
// If lazy commit is enabled, only provisioned slots are committed.
|
||||
size_t size_to_decommit =
|
||||
root->use_lazy_commit
|
||||
? bits::AlignUp(GetProvisionedSize(), SystemPageSize())
|
||||
: bucket->get_bytes_per_span();
|
||||
|
||||
// Not decommitted slot span must've had at least 1 allocation.
|
||||
PA_DCHECK(size_to_decommit > 0);
|
||||
root->DecommitSystemPagesForData(slot_span_start, size_to_decommit,
|
||||
PageKeepPermissionsIfPossible);
|
||||
|
||||
// We actually leave the decommitted slot span in the active list. We'll sweep
|
||||
// it on to the decommitted list when we next walk the active list.
|
||||
// Pulling this trick enables us to use a singly-linked list for all
|
||||
// cases, which is critical in keeping the slot span metadata structure down
|
||||
// to 32 bytes in size.
|
||||
SetFreelistHead(nullptr);
|
||||
num_unprovisioned_slots = 0;
|
||||
PA_DCHECK(is_decommitted());
|
||||
PA_DCHECK(bucket);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
|
||||
PartitionRoot<thread_safe>* root) {
|
||||
root->lock_.AssertAcquired();
|
||||
PA_DCHECK(empty_cache_index >= 0);
|
||||
PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
|
||||
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index]);
|
||||
empty_cache_index = -1;
|
||||
if (is_empty())
|
||||
Decommit(root);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void UnmapNow(void* reservation_start,
|
||||
size_t reservation_size,
|
||||
pool_handle giga_cage_pool) {
|
||||
PA_DCHECK(reservation_start && reservation_size > 0);
|
||||
#if DCHECK_IS_ON()
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
if (giga_cage_pool == GetBRPPool()) {
|
||||
// In 32-bit mode, the beginning of a reservation may be excluded from the
|
||||
// BRP pool, so shift the pointer. Non-BRP pool doesn't have logic.
|
||||
PA_DCHECK(IsManagedByPartitionAllocBRPPool(
|
||||
#if defined(PA_HAS_64_BITS_POINTERS)
|
||||
reservation_start
|
||||
#else
|
||||
reinterpret_cast<char*>(reservation_start) +
|
||||
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
|
||||
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
|
||||
#endif
|
||||
));
|
||||
} else
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
{
|
||||
PA_DCHECK(giga_cage_pool == GetNonBRPPool());
|
||||
// Non-BRP pool doesn't need adjustment that BRP needs in 32-bit mode.
|
||||
PA_DCHECK(IsManagedByPartitionAllocNonBRPPool(reservation_start));
|
||||
}
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(reservation_start);
|
||||
PA_DCHECK((ptr_as_uintptr & kSuperPageOffsetMask) == 0);
|
||||
uintptr_t ptr_end = ptr_as_uintptr + reservation_size;
|
||||
auto* offset_ptr = ReservationOffsetPointer(ptr_as_uintptr);
|
||||
// Reset the offset table entries for the given memory before unreserving
|
||||
// it. Since the memory is not unreserved and not available for other
|
||||
// threads, the table entries for the memory are not modified by other
|
||||
// threads either. So we can update the table entries without race
|
||||
// condition.
|
||||
uint16_t i = 0;
|
||||
while (ptr_as_uintptr < ptr_end) {
|
||||
PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(ptr_as_uintptr));
|
||||
PA_DCHECK(*offset_ptr == i++);
|
||||
*offset_ptr++ = kOffsetTagNotAllocated;
|
||||
ptr_as_uintptr += kSuperPageSize;
|
||||
}
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
AddressPoolManager::GetInstance()->MarkUnused(
|
||||
giga_cage_pool, reservation_start, reservation_size);
|
||||
#endif
|
||||
|
||||
// After resetting the table entries, unreserve and decommit the memory.
|
||||
AddressPoolManager::GetInstance()->UnreserveAndDecommit(
|
||||
giga_cage_pool, reservation_start, reservation_size);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template struct SlotSpanMetadata<ThreadSafe>;
|
||||
template struct SlotSpanMetadata<NotThreadSafe>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
762
src/base/allocator/partition_allocator/partition_page.h
Normal file
762
src/base/allocator/partition_allocator/partition_page.h
Normal file
@ -0,0 +1,762 @@
|
||||
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
|
||||
|
||||
#include <string.h>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_bucket.h"
|
||||
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/thread_annotations.h"
|
||||
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// An "extent" is a span of consecutive superpages. We link the partition's next
|
||||
// extent (if there is one) to the very start of a superpage's metadata area.
|
||||
template <bool thread_safe>
|
||||
struct PartitionSuperPageExtentEntry {
|
||||
PartitionRoot<thread_safe>* root;
|
||||
PartitionSuperPageExtentEntry<thread_safe>* next;
|
||||
uint16_t number_of_consecutive_super_pages;
|
||||
uint16_t number_of_nonempty_slot_spans;
|
||||
|
||||
ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans();
|
||||
ALWAYS_INLINE void DecrementNumberOfNonemptySlotSpans();
|
||||
};
|
||||
static_assert(
|
||||
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
|
||||
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
|
||||
static_assert(
|
||||
kMaxSuperPages / kSuperPageSize <=
|
||||
std::numeric_limits<
|
||||
decltype(PartitionSuperPageExtentEntry<
|
||||
ThreadSafe>::number_of_consecutive_super_pages)>::max(),
|
||||
"number_of_consecutive_super_pages must be big enough");
|
||||
|
||||
// Returns the base of the first super page in the range of consecutive super
|
||||
// pages. CAUTION! |extent| must point to the extent of the first super page in
|
||||
// the range of consecutive super pages.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE char* SuperPagesBeginFromExtent(
|
||||
PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
|
||||
PA_DCHECK(IsManagedByNormalBuckets(extent));
|
||||
return base::bits::AlignDown(reinterpret_cast<char*>(extent),
|
||||
kSuperPageAlignment);
|
||||
}
|
||||
|
||||
// Returns the end of the last super page in the range of consecutive super
|
||||
// pages. CAUTION! |extent| must point to the extent of the first super page in
|
||||
// the range of consecutive super pages.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE char* SuperPagesEndFromExtent(
|
||||
PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
return SuperPagesBeginFromExtent(extent) +
|
||||
(extent->number_of_consecutive_super_pages * kSuperPageSize);
|
||||
}
|
||||
|
||||
using AllocationStateMap =
|
||||
StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
|
||||
|
||||
// Metadata of the slot span.
|
||||
//
|
||||
// Some notes on slot span states. It can be in one of four major states:
|
||||
// 1) Active.
|
||||
// 2) Full.
|
||||
// 3) Empty.
|
||||
// 4) Decommitted.
|
||||
// An active slot span has available free slots, as well as allocated ones.
|
||||
// A full slot span has no free slots. An empty slot span has no allocated
|
||||
// slots, and a decommitted slot span is an empty one that had its backing
|
||||
// memory released back to the system.
|
||||
//
|
||||
// There are three linked lists tracking slot spans. The "active" list is an
|
||||
// approximation of a list of active slot spans. It is an approximation because
|
||||
// full, empty and decommitted slot spans may briefly be present in the list
|
||||
// until we next do a scan over it. The "empty" list holds mostly empty slot
|
||||
// spans, but may briefly hold decommitted ones too. The "decommitted" list
|
||||
// holds only decommitted slot spans.
|
||||
//
|
||||
// The significant slot span transitions are:
|
||||
// - Free() will detect when a full slot span has a slot freed and immediately
|
||||
// return the slot span to the head of the active list.
|
||||
// - Free() will detect when a slot span is fully emptied. It _may_ add it to
|
||||
// the empty list or it _may_ leave it on the active list until a future
|
||||
// list scan.
|
||||
// - Alloc() _may_ scan the active page list in order to fulfil the request.
|
||||
// If it does this, full, empty and decommitted slot spans encountered will be
|
||||
// booted out of the active list. If there are no suitable active slot spans
|
||||
// found, an empty or decommitted slot spans (if one exists) will be pulled
|
||||
// from the empty/decommitted list on to the active list.
|
||||
template <bool thread_safe>
|
||||
struct __attribute__((packed)) SlotSpanMetadata {
|
||||
PartitionFreelistEntry* freelist_head = nullptr;
|
||||
SlotSpanMetadata<thread_safe>* next_slot_span = nullptr;
|
||||
PartitionBucket<thread_safe>* const bucket;
|
||||
|
||||
// Deliberately signed, 0 for empty or decommitted slot spans, -n for full
|
||||
// slot spans:
|
||||
int16_t num_allocated_slots = 0;
|
||||
uint16_t num_unprovisioned_slots = 0;
|
||||
int8_t empty_cache_index = 0; // -1 if not in the empty cache.
|
||||
// < kMaxFreeableSpans.
|
||||
static_assert(kMaxFreeableSpans < std::numeric_limits<int8_t>::max(), "");
|
||||
const bool can_store_raw_size;
|
||||
|
||||
explicit SlotSpanMetadata(PartitionBucket<thread_safe>* bucket);
|
||||
|
||||
// Public API
|
||||
// Note the matching Alloc() functions are in PartitionPage.
|
||||
BASE_EXPORT NOINLINE void FreeSlowPath();
|
||||
ALWAYS_INLINE void Free(void* ptr);
|
||||
|
||||
void Decommit(PartitionRoot<thread_safe>* root);
|
||||
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
|
||||
|
||||
// Pointer manipulation functions. These must be static as the input
|
||||
// |slot_span| pointer may be the result of an offset calculation and
|
||||
// therefore cannot be trusted. The objective of these functions is to
|
||||
// sanitize this input.
|
||||
ALWAYS_INLINE static void* ToSlotSpanStartPtr(
|
||||
const SlotSpanMetadata* slot_span);
|
||||
ALWAYS_INLINE static SlotSpanMetadata* FromSlotStartPtr(void* slot_start);
|
||||
ALWAYS_INLINE static SlotSpanMetadata* FromSlotInnerPtr(void* ptr);
|
||||
|
||||
ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>* ToSuperPageExtent()
|
||||
const;
|
||||
|
||||
// Checks if it is feasible to store raw_size.
|
||||
ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size; }
|
||||
// The caller is responsible for ensuring that raw_size can be stored before
|
||||
// calling Set/GetRawSize.
|
||||
ALWAYS_INLINE void SetRawSize(size_t raw_size);
|
||||
ALWAYS_INLINE size_t GetRawSize() const;
|
||||
|
||||
ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
|
||||
|
||||
// Returns size of the region used within a slot. The used region comprises
|
||||
// of actual allocated data, extras and possibly empty space in the middle.
|
||||
ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
|
||||
// The returned size can be:
|
||||
// - The slot size for small buckets.
|
||||
// - Exact size needed to satisfy allocation (incl. extras), for large
|
||||
// buckets and direct-mapped allocations (see also the comment in
|
||||
// CanStoreRawSize() for more info).
|
||||
if (LIKELY(!CanStoreRawSize())) {
|
||||
return bucket->slot_size;
|
||||
}
|
||||
return GetRawSize();
|
||||
}
|
||||
|
||||
// This includes padding due to rounding done at allocation; we don't know the
|
||||
// requested size at deallocation, so we use this in both places.
|
||||
ALWAYS_INLINE size_t GetSizeForBookkeeping() const {
|
||||
// This could be more precise for allocations where CanStoreRawSize()
|
||||
// returns true (large allocations). However this is called for *every*
|
||||
// allocation, so we don't want an extra branch there.
|
||||
return bucket->slot_size;
|
||||
}
|
||||
|
||||
// Returns the size available to the app. It can be equal or higher than the
|
||||
// requested size. If higher, the overage won't exceed what's actually usable
|
||||
// by the app without a risk of running out of an allocated region or into
|
||||
// PartitionAlloc's internal data (like extras).
|
||||
ALWAYS_INLINE size_t GetUsableSize(PartitionRoot<thread_safe>* root) const {
|
||||
// The returned size can be:
|
||||
// - The slot size minus extras, for small buckets. This could be more than
|
||||
// requested size.
|
||||
// - Raw size minus extras, for large buckets and direct-mapped allocations
|
||||
// (see also the comment in CanStoreRawSize() for more info). This is
|
||||
// equal to requested size.
|
||||
size_t size_to_ajdust;
|
||||
if (LIKELY(!CanStoreRawSize())) {
|
||||
size_to_ajdust = bucket->slot_size;
|
||||
} else {
|
||||
size_to_ajdust = GetRawSize();
|
||||
}
|
||||
return root->AdjustSizeForExtrasSubtract(size_to_ajdust);
|
||||
}
|
||||
|
||||
// Returns the total size of the slots that are currently provisioned.
|
||||
ALWAYS_INLINE size_t GetProvisionedSize() const {
|
||||
size_t num_provisioned_slots =
|
||||
bucket->get_slots_per_span() - num_unprovisioned_slots;
|
||||
size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
|
||||
PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
|
||||
return provisioned_size;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Reset();
|
||||
|
||||
// TODO(ajwong): Can this be made private? https://crbug.com/787153
|
||||
BASE_EXPORT static SlotSpanMetadata* get_sentinel_slot_span();
|
||||
|
||||
// Page State accessors.
|
||||
// Note that it's only valid to call these functions on pages found on one of
|
||||
// the page lists. Specifically, you can't call these functions on full pages
|
||||
// that were detached from the active list.
|
||||
//
|
||||
// This restriction provides the flexibity for some of the status fields to
|
||||
// be repurposed when a page is taken off a list. See the negation of
|
||||
// |num_allocated_slots| when a full page is removed from the active list
|
||||
// for an example of such repurposing.
|
||||
ALWAYS_INLINE bool is_active() const;
|
||||
ALWAYS_INLINE bool is_full() const;
|
||||
ALWAYS_INLINE bool is_empty() const;
|
||||
ALWAYS_INLINE bool is_decommitted() const;
|
||||
|
||||
private:
|
||||
// sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
|
||||
// span in the active list. We could use nullptr, but in that case we need to
|
||||
// add a null-check branch to the hot allocation path. We want to avoid that.
|
||||
//
|
||||
// Note, this declaration is kept in the header as opposed to an anonymous
|
||||
// namespace so the getter can be fully inlined.
|
||||
static SlotSpanMetadata sentinel_slot_span_;
|
||||
// For the sentinel.
|
||||
constexpr SlotSpanMetadata() noexcept
|
||||
: bucket(nullptr), can_store_raw_size(false) {}
|
||||
};
|
||||
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
|
||||
"SlotSpanMetadata must fit into a Page Metadata slot.");
|
||||
|
||||
// Metadata of a non-first partition page in a slot span.
|
||||
struct SubsequentPageMetadata {
|
||||
// Raw size is the size needed to satisfy the allocation (requested size +
|
||||
// extras). If available, it can be used to report better statistics or to
|
||||
// bring protective cookie closer to the allocated memory.
|
||||
//
|
||||
// It can be used only if:
|
||||
// - there is no more than one slot in the slot span (otherwise we wouldn't
|
||||
// know which slot the raw size applies to)
|
||||
// - there is more than one partition page in the slot span (the metadata of
|
||||
// the first one is used to store slot information, but the second one is
|
||||
// available for extra information)
|
||||
size_t raw_size;
|
||||
};
|
||||
|
||||
// Each partition page has metadata associated with it. The metadata of the
|
||||
// first page of a slot span, describes that slot span. If a slot span spans
|
||||
// more than 1 page, the page metadata may contain rudimentary additional
|
||||
// information.
|
||||
template <bool thread_safe>
|
||||
struct PartitionPage {
|
||||
// "Pack" the union so that common page metadata still fits within
|
||||
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
|
||||
union __attribute__((packed)) {
|
||||
SlotSpanMetadata<thread_safe> slot_span_metadata;
|
||||
|
||||
SubsequentPageMetadata subsequent_page_metadata;
|
||||
|
||||
// sizeof(PartitionPageMetadata) must always be:
|
||||
// - a power of 2 (for fast modulo operations)
|
||||
// - below kPageMetadataSize
|
||||
//
|
||||
// This makes sure that this is respected no matter the architecture.
|
||||
char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
|
||||
};
|
||||
|
||||
// The first PartitionPage of the slot span holds its metadata. This offset
|
||||
// tells how many pages in from that first page we are.
|
||||
// For direct maps, the first page metadata (that isn't super page extent
|
||||
// entry) uses this field to tell how many pages to the right the direct map
|
||||
// metadata starts.
|
||||
//
|
||||
// 6 bits is enough to represent all possible offsets, given that the smallest
|
||||
// partition page is 16kiB and the offset won't exceed 1MiB.
|
||||
static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
|
||||
static constexpr uint16_t kMaxSlotSpanMetadataOffset =
|
||||
(1 << kMaxSlotSpanMetadataBits) - 1;
|
||||
uint8_t slot_span_metadata_offset;
|
||||
|
||||
// |is_valid| tells whether the page is part of a slot span. If |false|,
|
||||
// |has_valid_span_after_this| tells whether it's an unused region in between
|
||||
// slot spans within the super page.
|
||||
// Note, |is_valid| has been added for clarity, but if we ever need to save
|
||||
// this bit, it can be inferred from:
|
||||
// |!slot_span_metadata_offset && slot_span_metadata->bucket|.
|
||||
bool is_valid : 1;
|
||||
bool has_valid_span_after_this : 1;
|
||||
|
||||
ALWAYS_INLINE static PartitionPage* FromPtr(void* slot_start);
|
||||
|
||||
private:
|
||||
ALWAYS_INLINE static void* ToSlotSpanStartPtr(const PartitionPage* page);
|
||||
};
|
||||
|
||||
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
|
||||
"PartitionPage must be able to fit in a metadata slot");
|
||||
static_assert(sizeof(PartitionPage<NotThreadSafe>) == kPageMetadataSize,
|
||||
"PartitionPage must be able to fit in a metadata slot");
|
||||
|
||||
// Certain functions rely on PartitionPage being either SlotSpanMetadata or
|
||||
// SubsequentPageMetadata, and therefore freely casting between each other.
|
||||
static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
|
||||
static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
|
||||
0,
|
||||
"");
|
||||
static_assert(offsetof(PartitionPage<NotThreadSafe>, slot_span_metadata) == 0,
|
||||
"");
|
||||
static_assert(offsetof(PartitionPage<NotThreadSafe>,
|
||||
subsequent_page_metadata) == 0,
|
||||
"");
|
||||
|
||||
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
|
||||
PA_DCHECK(IsReservationStart(ptr));
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
|
||||
PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
|
||||
// The metadata area is exactly one system page (the guard page) into the
|
||||
// super page.
|
||||
return reinterpret_cast<char*>(pointer_as_uint + SystemPageSize());
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
|
||||
PartitionSuperPageToExtent(char* ptr) {
|
||||
return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(ptr));
|
||||
}
|
||||
|
||||
// Size that should be reserved for state bitmap (if present) inside a super
|
||||
// page. Elements of a super page are partition-page-aligned, hence the returned
|
||||
// size is a multiple of partition page size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
ReservedStateBitmapSize() {
|
||||
return bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
|
||||
}
|
||||
|
||||
// Size that should be committed for state bitmap (if present) inside a super
|
||||
// page. It is a multiple of system page size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
CommittedStateBitmapSize() {
|
||||
return bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
|
||||
}
|
||||
|
||||
// Returns the pointer to the state bitmap in the super page. It's the caller's
|
||||
// responsibility to ensure that the bitmaps even exist.
|
||||
ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(char* super_page_base) {
|
||||
PA_DCHECK(
|
||||
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
|
||||
return reinterpret_cast<AllocationStateMap*>(super_page_base +
|
||||
PartitionPageSize());
|
||||
}
|
||||
|
||||
ALWAYS_INLINE char* SuperPagePayloadBegin(char* super_page_base,
|
||||
bool with_quarantine) {
|
||||
PA_DCHECK(
|
||||
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
|
||||
return super_page_base + PartitionPageSize() +
|
||||
(with_quarantine ? ReservedStateBitmapSize() : 0);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE char* SuperPagePayloadEnd(char* super_page_base) {
|
||||
PA_DCHECK(
|
||||
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
|
||||
return super_page_base + kSuperPageSize - PartitionPageSize();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE size_t SuperPagePayloadSize(char* super_page_base,
|
||||
bool with_quarantine) {
|
||||
return SuperPagePayloadEnd(super_page_base) -
|
||||
SuperPagePayloadBegin(super_page_base, with_quarantine);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionSuperPageExtentEntry<
|
||||
thread_safe>::IncrementNumberOfNonemptySlotSpans() {
|
||||
#if DCHECK_IS_ON()
|
||||
char* super_page_begin =
|
||||
base::bits::AlignDown(reinterpret_cast<char*>(this), kSuperPageAlignment);
|
||||
PA_DCHECK(
|
||||
(SuperPagePayloadSize(super_page_begin, root->IsQuarantineAllowed()) /
|
||||
PartitionPageSize()) > number_of_nonempty_slot_spans);
|
||||
#endif
|
||||
++number_of_nonempty_slot_spans;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void PartitionSuperPageExtentEntry<
|
||||
thread_safe>::DecrementNumberOfNonemptySlotSpans() {
|
||||
PA_DCHECK(number_of_nonempty_slot_spans);
|
||||
--number_of_nonempty_slot_spans;
|
||||
}
|
||||
|
||||
// Returns whether the pointer lies within a normal-bucket super page's payload
|
||||
// area (i.e. area devoted to slot spans). It doesn't check whether it's within
|
||||
// a valid slot span. It merely ensures it doesn't fall in a meta-data region
|
||||
// that would surely never contain user data.
|
||||
ALWAYS_INLINE bool IsWithinSuperPagePayload(void* ptr, bool with_quarantine) {
|
||||
PA_DCHECK(IsManagedByNormalBuckets(ptr));
|
||||
char* super_page_base = reinterpret_cast<char*>(
|
||||
reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
|
||||
void* payload_start = SuperPagePayloadBegin(super_page_base, with_quarantine);
|
||||
void* payload_end = SuperPagePayloadEnd(super_page_base);
|
||||
return ptr >= payload_start && ptr < payload_end;
|
||||
}
|
||||
|
||||
// Returns the start of a slot, or nullptr if |maybe_inner_ptr| is not inside of
|
||||
// an existing slot span. The function may return a non-nullptr pointer even
|
||||
// inside a decommitted or free slot span, it's the caller responsibility to
|
||||
// check if memory is actually allocated.
|
||||
//
|
||||
// |maybe_inner_ptr| must be within a normal-bucket super page, and more
|
||||
// specifically within the payload area (i.e. area devoted to slot spans).
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE char* GetSlotStartInSuperPage(char* maybe_inner_ptr) {
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(IsManagedByNormalBuckets(maybe_inner_ptr));
|
||||
char* super_page_ptr = reinterpret_cast<char*>(
|
||||
reinterpret_cast<uintptr_t>(maybe_inner_ptr) & kSuperPageBaseMask);
|
||||
auto* extent = reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_ptr));
|
||||
PA_DCHECK(IsWithinSuperPagePayload(maybe_inner_ptr,
|
||||
extent->root->IsQuarantineAllowed()));
|
||||
#endif
|
||||
// Don't use FromSlotInnerPtr() because |is_valid| DCHECKs can fail there.
|
||||
auto* page = PartitionPage<thread_safe>::FromPtr(maybe_inner_ptr);
|
||||
if (!page->is_valid)
|
||||
return nullptr;
|
||||
page -= page->slot_span_metadata_offset;
|
||||
PA_DCHECK(page->is_valid);
|
||||
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||
auto* slot_span = &page->slot_span_metadata;
|
||||
// Check if the slot span is actually used and valid.
|
||||
if (!slot_span->bucket)
|
||||
return nullptr;
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
|
||||
#endif
|
||||
char* const slot_span_begin = static_cast<char*>(
|
||||
SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(slot_span));
|
||||
const ptrdiff_t ptr_offset = maybe_inner_ptr - slot_span_begin;
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(0 <= ptr_offset &&
|
||||
ptr_offset < static_cast<ptrdiff_t>(
|
||||
slot_span->bucket->get_pages_per_slot_span() *
|
||||
PartitionPageSize()));
|
||||
#endif
|
||||
// Slot span size in bytes is not necessarily multiple of partition page.
|
||||
if (ptr_offset >=
|
||||
static_cast<ptrdiff_t>(slot_span->bucket->get_bytes_per_span()))
|
||||
return nullptr;
|
||||
const size_t slot_size = slot_span->bucket->slot_size;
|
||||
const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
|
||||
char* const result = slot_span_begin + (slot_number * slot_size);
|
||||
PA_DCHECK(result <= maybe_inner_ptr && maybe_inner_ptr < result + slot_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Converts from a pointer to the PartitionPage object (within super pages's
|
||||
// metadata) into a pointer to the beginning of the slot span.
|
||||
// |page| must be the first PartitionPage of the slot span.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToSlotSpanStartPtr(
|
||||
const PartitionPage* page) {
|
||||
PA_DCHECK(page->is_valid);
|
||||
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||
return SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
|
||||
&page->slot_span_metadata);
|
||||
}
|
||||
|
||||
// Converts from a pointer inside a super page into a pointer to the
|
||||
// PartitionPage object (within super pages's metadata) that describes the
|
||||
// partition page where |ptr| is located. |ptr| doesn't have to be located
|
||||
// within a valid (i.e. allocated) slot span, but must be within the super
|
||||
// page's payload area (i.e. area devoted to slot spans).
|
||||
//
|
||||
// While it is generally valid for |ptr| to be in the middle of an allocation,
|
||||
// care has to be taken with direct maps that span multiple super pages. This
|
||||
// function's behavior is undefined if |ptr| lies in a subsequent super page.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionPage<thread_safe>::FromPtr(
|
||||
void* ptr) {
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
|
||||
char* super_page_ptr =
|
||||
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(IsReservationStart(super_page_ptr));
|
||||
if (IsManagedByNormalBuckets(ptr)) {
|
||||
auto* extent =
|
||||
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_ptr));
|
||||
PA_DCHECK(
|
||||
IsWithinSuperPagePayload(ptr, extent->root->IsQuarantineAllowed()));
|
||||
} else {
|
||||
PA_CHECK(ptr >= super_page_ptr + PartitionPageSize());
|
||||
}
|
||||
#endif
|
||||
|
||||
uintptr_t partition_page_index =
|
||||
(pointer_as_uint & kSuperPageOffsetMask) >> PartitionPageShift();
|
||||
// Index 0 is invalid because it is the super page extent metadata and the
|
||||
// last index is invalid because the whole PartitionPage is set as guard
|
||||
// pages. This repeats part of the payload PA_DCHECK above, which may check
|
||||
// for other exclusions.
|
||||
PA_DCHECK(partition_page_index);
|
||||
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
|
||||
return reinterpret_cast<PartitionPage<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_ptr) +
|
||||
(partition_page_index << kPageMetadataShift));
|
||||
}
|
||||
|
||||
// Converts from a pointer to the SlotSpanMetadata object (within a super
|
||||
// pages's metadata) into a pointer to the beginning of the slot span. This
|
||||
// works on direct maps too.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
|
||||
const SlotSpanMetadata* slot_span) {
|
||||
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
|
||||
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
|
||||
|
||||
// A valid |page| must be past the first guard System page and within
|
||||
// the following metadata region.
|
||||
PA_DCHECK(super_page_offset > SystemPageSize());
|
||||
// Must be less than total metadata region.
|
||||
PA_DCHECK(super_page_offset <
|
||||
SystemPageSize() +
|
||||
(NumPartitionPagesPerSuperPage() * kPageMetadataSize));
|
||||
uintptr_t partition_page_index =
|
||||
(super_page_offset - SystemPageSize()) >> kPageMetadataShift;
|
||||
// Index 0 is invalid because it is the super page extent metadata and the
|
||||
// last index is invalid because the whole PartitionPage is set as guard
|
||||
// pages.
|
||||
PA_DCHECK(partition_page_index);
|
||||
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
|
||||
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
|
||||
void* ret = reinterpret_cast<void*>(
|
||||
super_page_base + (partition_page_index << PartitionPageShift()));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Converts from a pointer inside a slot into a pointer to the SlotSpanMetadata
|
||||
// object (within super pages's metadata) that describes the slot span
|
||||
// containing that slot.
|
||||
//
|
||||
// CAUTION! For direct-mapped allocation, |ptr| has to be within the first
|
||||
// partition page.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(void* ptr) {
|
||||
auto* page = PartitionPage<thread_safe>::FromPtr(ptr);
|
||||
PA_DCHECK(page->is_valid);
|
||||
// Partition pages in the same slot span share the same slot span metadata
|
||||
// object (located in the first PartitionPage object of that span). Adjust
|
||||
// for that.
|
||||
page -= page->slot_span_metadata_offset;
|
||||
PA_DCHECK(page->is_valid);
|
||||
PA_DCHECK(!page->slot_span_metadata_offset);
|
||||
auto* slot_span = &page->slot_span_metadata;
|
||||
// For direct map, if |ptr| doesn't point within the first partition page,
|
||||
// |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
|
||||
// |slot_size| at 0.
|
||||
PA_DCHECK(slot_span->bucket->slot_size);
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
|
||||
char* super_page_base = reinterpret_cast<char*>(
|
||||
reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask);
|
||||
return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_base));
|
||||
}
|
||||
|
||||
// Like |FromSlotInnerPtr|, but asserts that pointer points to the beginning of
|
||||
// the slot. This works on direct maps too.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
|
||||
SlotSpanMetadata<thread_safe>::FromSlotStartPtr(void* slot_start) {
|
||||
auto* slot_span = FromSlotInnerPtr(slot_start);
|
||||
// Checks that the pointer is a multiple of slot size.
|
||||
auto* slot_span_start = ToSlotSpanStartPtr(slot_span);
|
||||
PA_DCHECK(!((reinterpret_cast<uintptr_t>(slot_start) -
|
||||
reinterpret_cast<uintptr_t>(slot_span_start)) %
|
||||
slot_span->bucket->slot_size));
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(size_t raw_size) {
|
||||
PA_DCHECK(CanStoreRawSize());
|
||||
auto* the_next_page = reinterpret_cast<PartitionPage<thread_safe>*>(this) + 1;
|
||||
the_next_page->subsequent_page_metadata.raw_size = raw_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
|
||||
PA_DCHECK(CanStoreRawSize());
|
||||
auto* the_next_page =
|
||||
reinterpret_cast<const PartitionPage<thread_safe>*>(this) + 1;
|
||||
return the_next_page->subsequent_page_metadata.raw_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
|
||||
PartitionFreelistEntry* new_head) {
|
||||
PA_DCHECK(!new_head ||
|
||||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
|
||||
(reinterpret_cast<uintptr_t>(new_head) & kSuperPageBaseMask));
|
||||
freelist_head = new_head;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(void* slot_start)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
|
||||
#if DCHECK_IS_ON()
|
||||
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
|
||||
root->lock_.AssertAcquired();
|
||||
#endif
|
||||
|
||||
PA_DCHECK(num_allocated_slots);
|
||||
// Catches an immediate double free.
|
||||
PA_CHECK(slot_start != freelist_head);
|
||||
// Look for double free one level deeper in debug.
|
||||
PA_DCHECK(!freelist_head ||
|
||||
slot_start != freelist_head->GetNext(bucket->slot_size));
|
||||
auto* entry = static_cast<internal::PartitionFreelistEntry*>(slot_start);
|
||||
entry->SetNext(freelist_head);
|
||||
SetFreelistHead(entry);
|
||||
--num_allocated_slots;
|
||||
if (UNLIKELY(num_allocated_slots <= 0)) {
|
||||
FreeSlowPath();
|
||||
} else {
|
||||
// All single-slot allocations must go through the slow path to
|
||||
// correctly update the raw size.
|
||||
PA_DCHECK(!CanStoreRawSize());
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
return (num_allocated_slots > 0 &&
|
||||
(freelist_head || num_unprovisioned_slots));
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
|
||||
if (ret) {
|
||||
PA_DCHECK(!freelist_head);
|
||||
PA_DCHECK(!num_unprovisioned_slots);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
return (!num_allocated_slots && freelist_head);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
|
||||
PA_DCHECK(this != get_sentinel_slot_span());
|
||||
bool ret = (!num_allocated_slots && !freelist_head);
|
||||
if (ret) {
|
||||
PA_DCHECK(!num_unprovisioned_slots);
|
||||
PA_DCHECK(empty_cache_index == -1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
|
||||
PA_DCHECK(is_decommitted());
|
||||
|
||||
num_unprovisioned_slots = bucket->get_slots_per_span();
|
||||
PA_DCHECK(num_unprovisioned_slots);
|
||||
|
||||
ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
|
||||
|
||||
next_slot_span = nullptr;
|
||||
}
|
||||
|
||||
// Returns the state bitmap from a pointer within a normal-bucket super page.
|
||||
// It's the caller's responsibility to ensure that the bitmap exists.
|
||||
ALWAYS_INLINE AllocationStateMap* StateBitmapFromPointer(void* ptr) {
|
||||
PA_DCHECK(IsManagedByNormalBuckets(ptr));
|
||||
auto* super_page_base = reinterpret_cast<char*>(
|
||||
reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
|
||||
return SuperPageStateBitmap(super_page_base);
|
||||
}
|
||||
|
||||
// Iterates over all slot spans in a super-page. |Callback| must return true if
|
||||
// early return is needed.
|
||||
template <bool thread_safe, typename Callback>
|
||||
void IterateSlotSpans(char* super_page_base,
|
||||
bool with_quarantine,
|
||||
Callback callback) {
|
||||
#if DCHECK_IS_ON()
|
||||
PA_DCHECK(
|
||||
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
|
||||
auto* extent_entry =
|
||||
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
|
||||
PartitionSuperPageToMetadataArea(super_page_base));
|
||||
extent_entry->root->lock_.AssertAcquired();
|
||||
#endif
|
||||
|
||||
using Page = PartitionPage<thread_safe>;
|
||||
using SlotSpan = SlotSpanMetadata<thread_safe>;
|
||||
auto* const first_page =
|
||||
Page::FromPtr(SuperPagePayloadBegin(super_page_base, with_quarantine));
|
||||
auto* const last_page =
|
||||
Page::FromPtr(SuperPagePayloadEnd(super_page_base) - PartitionPageSize());
|
||||
Page* page;
|
||||
SlotSpan* slot_span;
|
||||
for (page = first_page; page <= last_page;) {
|
||||
PA_DCHECK(!page->slot_span_metadata_offset); // Ensure slot span beginning.
|
||||
if (!page->is_valid) {
|
||||
if (page->has_valid_span_after_this) {
|
||||
// The page doesn't represent a valid slot span, but there is another
|
||||
// one somewhere after this. Keep iterating to find it.
|
||||
++page;
|
||||
continue;
|
||||
}
|
||||
// There are currently no valid spans from here on. No need to iterate
|
||||
// the rest of the super page.
|
||||
break;
|
||||
}
|
||||
slot_span = &page->slot_span_metadata;
|
||||
if (callback(slot_span))
|
||||
return;
|
||||
page += slot_span->bucket->get_pages_per_slot_span();
|
||||
}
|
||||
// Each super page must have at least one valid slot span.
|
||||
PA_DCHECK(page > first_page);
|
||||
// Just a quick check that the search ended at a valid slot span and there
|
||||
// was no unnecessary iteration over gaps afterwards.
|
||||
PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
|
||||
slot_span->bucket->get_pages_per_slot_span());
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
|
242
src/base/allocator/partition_allocator/partition_ref_count.h
Normal file
242
src/base/allocator/partition_allocator/partition_ref_count.h
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace internal {
|
||||
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
namespace {
|
||||
|
||||
[[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeOrCorruptionDetected() {
|
||||
NO_CODE_FOLDING();
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Special-purpose atomic reference count class used by BackupRefPtrImpl.
|
||||
// The least significant bit of the count is reserved for tracking the liveness
|
||||
// state of an allocation: it's set when the allocation is created and cleared
|
||||
// on free(). So the count can be:
|
||||
//
|
||||
// 1 for an allocation that is just returned from Alloc()
|
||||
// 2 * k + 1 for a "live" allocation with k references
|
||||
// 2 * k for an allocation with k dangling references after Free()
|
||||
//
|
||||
// This protects against double-free's, as we check whether the reference count
|
||||
// is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
|
||||
class BASE_EXPORT PartitionRefCount {
|
||||
public:
|
||||
PartitionRefCount();
|
||||
|
||||
// Incrementing the counter doesn't imply any visibility about modified
|
||||
// memory, hence relaxed atomics. For decrement, visibility is required before
|
||||
// the memory gets freed, necessitating an acquire/release barrier before
|
||||
// freeing the memory.
|
||||
|
||||
// For details, see base::AtomicRefCount, which has the same constraints and
|
||||
// characteristics.
|
||||
ALWAYS_INLINE void Acquire() {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckCookie();
|
||||
#endif
|
||||
|
||||
PA_CHECK(count_.fetch_add(2, std::memory_order_relaxed) > 0);
|
||||
}
|
||||
|
||||
// Returns true if the allocation should be reclaimed.
|
||||
ALWAYS_INLINE bool Release() {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckCookie();
|
||||
#endif
|
||||
|
||||
if (count_.fetch_sub(2, std::memory_order_release) == 2) {
|
||||
// In most thread-safe reference count implementations, an acquire
|
||||
// barrier is required so that all changes made to an object from other
|
||||
// threads are visible to its destructor. In our case, the destructor
|
||||
// finishes before the final `Release` call, so it shouldn't be a problem.
|
||||
// However, we will keep it as a precautionary measure.
|
||||
std::atomic_thread_fence(std::memory_order_acquire);
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
// The allocation is about to get freed, so clear the cookie.
|
||||
brp_cookie_ = 0;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if the allocation should be reclaimed.
|
||||
// This function should be called by the allocator during Free().
|
||||
ALWAYS_INLINE bool ReleaseFromAllocator() {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckCookie();
|
||||
#endif
|
||||
|
||||
int32_t old_count = count_.fetch_sub(1, std::memory_order_release);
|
||||
if (UNLIKELY(!(old_count & 1)))
|
||||
DoubleFreeOrCorruptionDetected();
|
||||
if (old_count == 1) {
|
||||
std::atomic_thread_fence(std::memory_order_acquire);
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
// The allocation is about to get freed, so clear the cookie.
|
||||
brp_cookie_ = 0;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// "IsAlive" means is allocated and not freed. "KnownRefs" refers to
|
||||
// raw_ptr<T> references. There may be other references from raw pointers or
|
||||
// unique_ptr, but we have no way of tracking them, so we hope for the best.
|
||||
// To summarize, the function returns whether we believe the allocation can be
|
||||
// safely freed.
|
||||
ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckCookie();
|
||||
#endif
|
||||
|
||||
return count_.load(std::memory_order_acquire) == 1;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsAlive() {
|
||||
bool alive = count_.load(std::memory_order_relaxed) & 1;
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
if (alive)
|
||||
CheckCookie();
|
||||
#endif
|
||||
return alive;
|
||||
}
|
||||
|
||||
private:
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
// The cookie helps us ensure that:
|
||||
// 1) The reference count pointer calculation is correct.
|
||||
// 2) The returned allocation slot is not freed.
|
||||
ALWAYS_INLINE void CheckCookie() {
|
||||
PA_CHECK(brp_cookie_ == CalculateCookie());
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uint32_t CalculateCookie() {
|
||||
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
|
||||
kCookieSalt;
|
||||
}
|
||||
|
||||
static constexpr uint32_t kCookieSalt = 0xc01dbeef;
|
||||
volatile uint32_t brp_cookie_;
|
||||
#endif // DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
|
||||
std::atomic<int32_t> count_{1};
|
||||
};
|
||||
|
||||
ALWAYS_INLINE PartitionRefCount::PartitionRefCount()
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
: brp_cookie_(CalculateCookie())
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
|
||||
static_assert(base::kAlignment % alignof(PartitionRefCount) == 0,
|
||||
"kAlignment must be multiples of alignof(PartitionRefCount).");
|
||||
|
||||
// Allocate extra space for the reference count to satisfy the alignment
|
||||
// requirement.
|
||||
static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
|
||||
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
|
||||
constexpr size_t kPartitionPastAllocationAdjustment = 0;
|
||||
|
||||
constexpr size_t kPartitionRefCountIndexMultiplier =
|
||||
SystemPageSize() /
|
||||
(sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()));
|
||||
|
||||
static_assert((sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()) *
|
||||
kPartitionRefCountIndexMultiplier <=
|
||||
SystemPageSize()),
|
||||
"PartitionRefCount Bitmap size must be smaller than or equal to "
|
||||
"<= SystemPageSize().");
|
||||
|
||||
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckThatSlotOffsetIsZero(slot_start);
|
||||
#endif
|
||||
uintptr_t slot_start_as_uintptr = reinterpret_cast<uintptr_t>(slot_start);
|
||||
if (LIKELY(slot_start_as_uintptr & SystemPageOffsetMask())) {
|
||||
uintptr_t refcount_ptr_as_uintptr =
|
||||
slot_start_as_uintptr - sizeof(PartitionRefCount);
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
PA_CHECK(refcount_ptr_as_uintptr % alignof(PartitionRefCount) == 0);
|
||||
#endif
|
||||
return reinterpret_cast<PartitionRefCount*>(refcount_ptr_as_uintptr);
|
||||
} else {
|
||||
PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
|
||||
(slot_start_as_uintptr & kSuperPageBaseMask) + SystemPageSize() * 2);
|
||||
size_t index =
|
||||
((slot_start_as_uintptr & kSuperPageOffsetMask) >> SystemPageShift()) *
|
||||
kPartitionRefCountIndexMultiplier;
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
|
||||
#endif
|
||||
return bitmap_base + index;
|
||||
}
|
||||
}
|
||||
|
||||
#else // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
|
||||
// Allocate extra space for the reference count to satisfy the alignment
|
||||
// requirement.
|
||||
static constexpr size_t kInSlotRefCountBufferSize = base::kAlignment;
|
||||
constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
|
||||
|
||||
// This is for adjustment of pointers right past the allocation, which may point
|
||||
// to the next slot. First subtract 1 to bring them to the intended slot, and
|
||||
// only then we'll be able to find ref-count in that slot.
|
||||
constexpr size_t kPartitionPastAllocationAdjustment = 1;
|
||||
|
||||
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
|
||||
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
CheckThatSlotOffsetIsZero(slot_start);
|
||||
#endif
|
||||
return reinterpret_cast<PartitionRefCount*>(slot_start);
|
||||
}
|
||||
|
||||
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
|
||||
static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
|
||||
"PartitionRefCount should fit into the in-slot buffer.");
|
||||
|
||||
#else // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
static constexpr size_t kInSlotRefCountBufferSize = 0;
|
||||
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
|
||||
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user