Compare commits

...

No commits in common. "900f316fde0c9b0d6aa08750b955b64bc23da9bf" and "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" have entirely different histories.

1383 changed files with 25079 additions and 25841 deletions

View File

@ -1 +1 @@
113.0.5672.62
112.0.5615.49

View File

@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [SagerNet](https://github.com/SagerNet/SagerNet)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome.

View File

@ -10,7 +10,6 @@ Standard: Cpp11
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
# the Chromium style (is implied by BasedOnStyle: Chromium).
InsertBraces: true
InsertNewlineAtEOF: true
# Make sure code like:
# IPC_BEGIN_MESSAGE_MAP()

View File

@ -39,7 +39,6 @@ Aditya Bhargava <heuristicist@gmail.com>
Adrian Belgun <adrian.belgun@intel.com>
Adrian Ratiu <adrian.ratiu@collabora.corp-partner.google.com>
Adrià Vilanova Martínez <me@avm99963.com>
Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com>
@ -139,7 +138,6 @@ Arnaud Mandy <arnaud.mandy@intel.com>
Arnaud Renevier <a.renevier@samsung.com>
Arpita Bahuguna <a.bah@samsung.com>
Arthur Lussos <developer0420@gmail.com>
Artin Lindqvist <artin.lindqvist.chromium@gmail.com>
Artur Akerberg <artur.aker@gmail.com>
Arun Kulkarni <kulkarni.a@samsung.com>
Arun Kumar <arun87.kumar@samsung.com>
@ -237,7 +235,6 @@ Cheng Zhao <zcbenz@gmail.com>
Cheng Yu <yuzichengcode@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Choudhury M. Shamsujjoha <choudhury.s@samsung.com>
Chris Dalton <chris@rive.app>
Chris Greene <cwgreene@amazon.com>
Chris Harrelson <chrishtr@gmail.com>
Chris Nardi <hichris123@gmail.com>
@ -282,7 +279,6 @@ Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com>
Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com>
Darshan Sen <raisinten@gmail.com>
@ -304,7 +300,6 @@ David Sanders <dsanders11@ucsbalum.com>
David Spellman <dspell@amazon.com>
David Valachovic <adenflorian@gmail.com>
Dax Kelson <dkelson@gurulabs.com>
Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com>
Debug Wang <debugwang@tencent.com>
Deepak Dilip Borade <deepak.db@samsung.com>
@ -469,7 +464,6 @@ Horia Olaru <olaru@adobe.com>
Hosung You <hosung.you@samsung.com>
Huapeng Li <huapengl@amazon.com>
Huayong Xu <huayong.xu@samsung.com>
Hung Ngo <ngotienhung195@gmail.com>
Hugo Holgersson <hugo.holgersson@sonymobile.com>
Hui Wang <wanghui07050707@gmail.com>
Hui Wang <wanghui210@huawei.com>
@ -509,7 +503,6 @@ Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com>
Jack Bates <jack@nottheoilrig.com>
Jackson Loeffler <j@jloeffler.com>
Jacky Hu <flameddd@gmail.com>
Jacob Clark <jacob.jh.clark@googlemail.com>
Jacob Mandelson <jacob@mandelson.org>
@ -577,7 +570,6 @@ Jiangzhen Hou <houjiangzhen@360.cn>
Jianjun Zhu <jianjun.zhu@intel.com>
Jianneng Zhong <muzuiget@gmail.com>
Jiawei Shao <jiawei.shao@intel.com>
Jiawei Chen <jiawei.chen@dolby.com>
Jiaxun Wei <leuisken@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
Jidong Qin <qinjidong@qianxin.com>
@ -610,7 +602,6 @@ Joe Thomas <mhx348@motorola.com>
Joel Stanley <joel@jms.id.au>
Joey Jiao <joeyjiao0810@gmail.com>
Joey Mou <joeymou@amazon.com>
Johann <johann@duck.com>
Johannes Rudolph <johannes.rudolph@googlemail.com>
John Ingve Olsen <johningveolsen@gmail.com>
John Kleinschmidt <kleinschmidtorama@gmail.com>
@ -752,8 +743,6 @@ Leon Han <leon.han@intel.com>
Leung Wing Chung <lwchkg@gmail.com>
Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com>
Lian Ruilong <lianrl@dingdao.com>
Lian Ruilong <lianruilong1108@gmail.com>
Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com>
Lin Peng <penglin220@gmail.com>
@ -775,7 +764,7 @@ Luka Dojcilovic <l.dojcilovic@gmail.com>
Lukas Lihotzki <lukas@lihotzki.de>
Lukasz Krakowiak <lukasz.krakowiak@mobica.com>
Luke Inman-Semerau <luke.semerau@gmail.com>
Luke Gu <gulukesh@gmail.com>
Luke Seunghoe Gu <gulukesh@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Luoxi Pan <l.panpax@gmail.com>
Lu Yahan <yahan@iscas.ac.cn>
@ -797,7 +786,6 @@ Manuel Lagana <manuel.lagana.dev@gmail.com>
Mao Yujie <maojie0924@gmail.com>
Mao Yujie <yujie.mao@intel.com>
Marc des Garets <marc.desgarets@googlemail.com>
Marcio Caroso <msscaroso@gmail.com>
Marcin Wiacek <marcin@mwiacek.com>
Marco Rodrigues <gothicx@gmail.com>
Marcos Caceres <marcos@marcosc.com>
@ -1302,7 +1290,6 @@ Vinoth Chandar <vinoth@uber.com>
Vipul Bhasin <vipul.bhasin@gmail.com>
Visa Putkinen <v.putkinen@partner.samsung.com>
Vishal Bhatnagar <vishal.b@samsung.com>
Vishal Lingam <vishal.reddy@samsung.com>
Vitaliy Kharin <kvserr@gmail.com>
Vivek Galatage <vivek.vg@samsung.com>
Volker Sorge <volker.sorge@gmail.com>
@ -1317,7 +1304,6 @@ Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com>
Wesley Lancel <wesleylancel@gmail.com>
Wei Wang <wei4.wang@intel.com>
Wei Wen <wenwei.wenwei@bytedance.com>
Wesley Wigham <wwigham@gmail.com>
Will Cohen <wwcohen@gmail.com>
Will Hirsch <chromium@willhirsch.co.uk>

View File

@ -33,24 +33,37 @@ if (is_official_build) {
assert(!is_component_build)
}
# The `gn_all` target is used to list all of the main targets in the build, so
# that we can figure out which BUILD.gn files to process, following the process
# described at the top of this file.
# This file defines the following two main targets:
#
# Because of the way GN works (again, as described above), there may be targets
# built by `all` that aren't built by `gn_all`. We always want `all` to build,
# so there's really never a reason you'd want to build `gn_all` instead of
# `all`, and no tooling should depend directly on this target. Tools should
# should depend on either an explicit list of targets, or `all`.
# "gn_all" is used to create explicit dependencies from the root BUILD.gn to
# each top-level component that we wish to include when building everything via
# "all". This is required since the set of targets built by "all" is determined
# automatically based on reachability from the root BUILD.gn (for details, see
# crbug.com/503241). Builders should typically use "all", or list targets
# explicitly, rather than relying on "gn_all".
#
# "gn_visibility": targets that are normally not visible to top-level targets,
# but are built anyway by "all". Since we don't want any such targets, we have
# this placeholder to make sure hidden targets that aren't otherwise depended
# on yet are accounted for.
group("gn_all") {
testonly = true
deps = [
":gn_visibility",
"//net",
]
}
group("gn_visibility") {
deps = [
"//build/config/sanitizers:options_sources",
# "//third_party/pdfium:pdfium_embeddertests", # TODO(GYP): visibility?
# "//third_party/pdfium:pdfium_unittests", # TODO(GYP): visibility?
]
}
if (is_android) {
group("optimize_gn_gen") {
deps = [

372
src/DEPS
View File

@ -38,6 +38,7 @@ gclient_gn_args = [
'checkout_android',
'checkout_android_prebuilts_build_tools',
'checkout_android_native_support',
'checkout_google_benchmark',
'checkout_ios_webkit',
'checkout_nacl',
'checkout_openxr',
@ -97,8 +98,10 @@ vars = {
# restricted to Googlers only.
'checkout_chromium_password_manager_test_dependencies': False,
# Checkout fuzz archive. Should not need in builders.
'checkout_clusterfuzz_data': False,
# By default, do not check out Google Benchmark. The library is only used by a
# few specialized benchmarks that most developers do not interact with. Will
# be overridden by gclient variables.
'checkout_google_benchmark': False,
# By default, checkout JavaScript coverage node modules. These packages
# are used to post-process raw v8 coverage reports into IstanbulJS compliant
@ -133,8 +136,11 @@ vars = {
# Fetch clang-tidy into the same bin/ directory as our clang binary.
'checkout_clang_tidy': False,
# Fetch clang libraries and headers in order to build clang tooling.
'checkout_clang_libs': False,
# Fetch clang libraries and headers in order to build clang tooling. This is
# required to build C++-Rust interop codegen tools. This may break things that
# use it when clang rolls, and is meant for prototyping. You should talk to
# tools/clang/OWNERS before depending on it.
'checkout_clang_libs': 'use_rust',
# Fetch clangd into the same bin/ directory as our clang binary.
'checkout_clangd': False,
@ -150,7 +156,7 @@ vars = {
# ..._rs_api_impl.cc that are generated by prebuilt (see
# tools/rust/build_crubit.py) Crubit tools during Chromium build (see
# also //build/rust/rs_bindings_from_cc.gni).
'checkout_crubit': False,
'checkout_crubit': 'use_rust',
# By default checkout the OpenXR loader library only on Windows. The OpenXR
# backend for VR in Chromium is currently only supported for Windows, but
@ -223,7 +229,7 @@ vars = {
#
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '15380.0.0',
'lacros_sdk_version': '15357.0.0',
# Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates
@ -235,7 +241,7 @@ vars = {
# luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:320bf3ed60cd4d24549d0ea9ee3a94394f2665ce',
'luci_go': 'git_revision:8a8b4f2ea65c7ff5fde8a0c522008aed78d42d9d',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision.
@ -249,10 +255,7 @@ vars = {
# We avoid doing this on toolchain build bots (where
# `checkout_rust_toolchain_deps` is set) since they are building the Rust
# toolchain.
'checkout_rust': 'host_os == "linux" and not checkout_rust_toolchain_deps',
# Fetch the Android team's Rust toolchain.
'fetch_android_chromium_rust_toolchain': False,
'fetch_prebuilt_chromium_rust_toolchain': 'use_rust and host_os == "linux" and not checkout_rust_toolchain_deps',
# Build in-tree Rust toolchain. checkout_clang_libs must also be True. The
# corresponding GN arg use_chromium_rust_toolchain directs the build to use
@ -272,28 +275,20 @@ vars = {
# Make Dawn skip its standalone dependencies
'dawn_standalone': False,
# Fetch configuration files required for the 'use_remoteexec' gn arg
'download_remoteexec_cfg': False,
# RBE instance to use for running remote builds
'rbe_instance': Str('projects/rbe-chrome-untrusted/instances/default_instance'),
# RBE project to download rewrapper config files for. Only needed if
# different from the project used in 'rbe_instance'
'rewrapper_cfg_project': Str(''),
# reclient CIPD package
'reclient_package': 'infra/rbe/client/',
# reclient CIPD package version
'reclient_version': 're_client_version:0.99.0.3f95625-gomaip',
'reclient_version': 're_client_version:0.96.2.d36a87c-gomaip',
# Fetch Rust-related packages.
'use_rust': False,
# Fetch dependencies needed to build Rust toolchain. Not needed if developing
# Rust code in Chromium; instead enable checkout_rust
# (which is gradually being made the default across different platforms).
# Only use if building the Rust toolchain.
# Rust code in Chromium; instead enable use_rust. Only use if building the
# Rust toolchain.
'checkout_rust_toolchain_deps': False,
'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com',
'chrome_git': 'https://chrome-internal.googlesource.com',
'chromium_git': 'https://chromium.googlesource.com',
'dawn_git': 'https://dawn.googlesource.com',
'pdfium_git': 'https://pdfium.googlesource.com',
@ -304,34 +299,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': '1195e70d671947af02a6a5b0ddc65806b9645252',
'skia_revision': 'f5fefe5245098be43cb608eace5e14d67cdc09e6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': '0e75d85d8e3467a536bca01d89d8a180a8bcffca',
'v8_revision': '96fed67922e5f54a027aed80259e5083769e33e2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': 'c2efe6fcdbc548c5ba6c1fa9c5415e256c7211ba',
'angle_revision': '293db5ce4d0766cb3ba7711057a00f0a5bddb00d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': 'c85d70d97009a264fc5e7747316743a1abac5f67',
'swiftshader_revision': '3575b5479af54e471ea6750a8585e2c9bc87801c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': '7c9b2b33ac5759b0443d8f6e01f07432ff034c12',
'pdfium_revision': '4090d4c0f9873f5f50b630c26c2439b5297a6e49',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '74646566e93de7551bfdfc5f49de7462f13d1d05',
'boringssl_revision': 'ca1690e221677cea3fb946f324eb89d846ec53f2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:12.20230323.1.1',
'fuchsia_version': 'version:11.20230223.1.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -351,11 +346,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other.
'nacl_revision': 'd2f043255597fe84888bbece2909f342f39b36c6',
'nacl_revision': '417b413dbe94a861ee050d42daf945cca02dba11',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': '4d8db130ea4342317581bab65fc96365ce806b77',
'freetype_revision': '3f01161ff22c84c371b6dc3b5e0351e0d6e8e771',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
@ -375,11 +370,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': 'fb540cd80e69889f9d241a5b1946d9147c3f6c20',
'catapult_revision': '4274fe29dac91b7713244daebb6f1d2364d97193',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling CrossBench
# and whatever else without interference from each other.
'crossbench_revision': '0941f19d9b1bab30137d9fcad6ee2ee44d338913',
'crossbench_revision': '27639d495e1cec411073bc82ba1fe368ce0ca89a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
@ -387,11 +382,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling centipede
# and whatever else without interference from each other.
'centipede_revision': 'a5a9071410e6e8134855b88461d0eb2c77d48cdd',
'centipede_revision': '63ed43d2bfa2c04e291e81d643a5581c9be90c53',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': 'e24710d614f3f16412444484e1f64b4439b965d8',
'devtools_frontend_revision': '2436ae2c5444ba8008a9f092301209a87aef0483',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
@ -431,11 +426,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': '069e2441956bc1d74670d2faa746534601c683c3',
'dawn_revision': 'de24841411c4cfb13662238327f2e456c82d26f6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': '7e67f01d296f9a57f63ca41af665f7b354078772',
'quiche_revision': '40c87d454d762f3daaeca334cd2dc962f0476b13',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -451,11 +446,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other.
'libavif_revision': '129a59047ea2b31f21109b4ed07dde32662f1b4c',
'libavif_revision': '5d16f1f430c7053f9ea75ea33897df0dc2d2c847',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': '3f1b0c81e3c441f89b94843a60461677972f163e',
'nearby_revision': 'd260feced56cfdea53f941883c250d635ed6064d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -467,7 +462,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'cros_components_revision': '0631a9fae2b5f424dd1f938ba03a422b22755c90',
'cros_components_revision': 'fb2448fc618b4e3634c8c4097b4a84fcfa34c540',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -475,11 +470,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': '9643f2cf13d6935a84a30b7da7de53327733e190',
'libcxxabi_revision': '87d8fe050bedb143f232b9ff99a0a46897640e5d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': '29a6dda8c6588ba4abeafdb21be531e757983e31',
'libunwind_revision': 'c5e861c7b48ee121d3719b7b5c14175c47ec5cc9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -487,18 +482,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'highway_revision': '8f20644eca693cfb74aa795b0006b6779c370e7a',
'highway_revision': '58746ca5b9f9444a2a3549704602ecc6239f8f41',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': 'a51c75b09b3f54ab2d3efe583dcc89ba84d24c0d',
'ffmpeg_revision': 'ee0c52d52036ecadfd38affec86c04937480bedb',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': '5622befaf8a9d539bc94c9f1341b8e76065334db',
'libcxx_revision': 'e136ec5032a5e5d97e988ce66e8c269a80ff54c4',
# GN CIPD package version.
'gn_version': 'git_revision:41fef642de70ecdcaaa26be96d56a0398f95abd4',
'gn_version': 'git_revision:b25a2f8c2d33f02082f0f258350f5e22c0973108',
# ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -573,7 +568,7 @@ deps = {
'src/buildtools/reclient': {
'packages': [
{
'package': Var('reclient_package') + '${{platform}}',
'package': 'infra/rbe/client/${{platform}}',
'version': Var('reclient_version'),
}
],
@ -587,7 +582,8 @@ deps = {
},
],
'dep_type': 'cipd',
'condition': 'fetch_android_chromium_rust_toolchain',
# TODO(https://crbug.com/1292038): gate this on use_rust as well as host_os.
'condition': 'host_os == "linux"',
},
# We don't know target_cpu at deps time. At least until there's a universal
@ -625,22 +621,11 @@ deps = {
'condition': '(host_os == "linux")',
},
'src/third_party/aosp_dalvik': {
'packages': [
{
'package': 'chromium/third_party/aosp_dalvik/linux-amd64',
'version': 'version:2@13.0.0_r24.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/android_webview/tools/cts_archive': {
'packages': [
{
'package': 'chromium/android_webview/tools/cts_archive',
'version': 'D9ziv4Z61qHwterMTFEoPeCZzFNyUfMnWAlYuxEKlM8C',
'version': 'APYMYksv9eNAkU6osFvNXN38pMO1Q1kScjeecePr7NgC',
},
],
'condition': 'checkout_android',
@ -648,7 +633,7 @@ deps = {
},
'src/chrome/browser/resources/preinstalled_web_apps/internal': {
'url': Var('chrome_git') + '/chrome/components/default_apps.git' + '@' + '3610b316398d68e3ae89388c03cb8bd8eb30e76a',
'url': 'https://chrome-internal.googlesource.com/chrome/components/default_apps.git' + '@' + '3610b316398d68e3ae89388c03cb8bd8eb30e76a',
'condition': 'checkout_src_internal',
},
@ -657,28 +642,17 @@ deps = {
'condition': 'checkout_mac',
},
'src/third_party/updater/chrome_linux64': {
'dep_type': 'cipd',
'condition': 'checkout_linux',
'packages': [
{
'package': 'chromium/third_party/updater/chrome_linux64',
'version': 'iSlHWT_rV1izyFEkrQSJUVMZ_Z_8yUezA1tZGxQGsyQC',
},
],
},
'src/third_party/updater/chrome_mac_universal': {
'dep_type': 'cipd',
'condition': 'checkout_mac',
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': '7VhToYn-E3LXpYnm9s9jYdxNXke02DtWaJDqYrYRnNUC',
'version': 'u1XGTm7703jO-Ax33P8j-x_KijOeHd36aBA6SO8V3a8C',
},
],
},
}
,
'src/third_party/updater/chrome_mac_universal_prod': {
'dep_type': 'cipd',
'condition': 'checkout_mac',
@ -696,7 +670,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'tZsZTfw7VWDV34gVY9dbe0GfdIpzyEiI8OixEeNiZa8C',
'version': 'RmZn_R0BOPSbruD15DEq9pfu5IhhtjoJX6z-ufrWnD4C',
},
],
},
@ -707,7 +681,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'WmP4Vc7qJey93eEtaErCNDpQisABBTDHw4TWPOJyFU4C',
'version': 'AAes70A2b8-CLhU1h9Sh1x2K-N3WjG7j2Tlp6VOgmnQC',
},
],
},
@ -719,7 +693,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': '181zBl1aAUqG8duH_W8VOA1eT009_B8SX-Wevln3sI4C',
'version': '5ui7_fqpvI7a8omWqM8iyD0PZFPJpYKoMHkAb6xA_TkC',
},
],
},
@ -731,7 +705,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'BdTuSFp4NP_JTwW5UBQtYiCIECiizKxOXuOvJBrkzuMC',
'version': '0KnizXQ2_n_V3aEHR240LqMKw7b0-Pm9VBUmVuQh0cAC',
},
],
},
@ -742,7 +716,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': '7axlCTeGu9zVyvMFjl_VGHdOeChRxuMVUg9lJB0t0y0C',
'version': 'g_24x4tVzQIoveectPGIXIGc2ptYDTnOodXieF_OG_4C',
},
],
},
@ -753,18 +727,18 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'bg3EWZJ5FdbYY7I949RV5oo782KRGxW4c9Qx2QHVW5oC',
'version': 's6U9lpJZsILIKkP5bCc_QqvJjPW2MwMWg5IoCBt_YEYC',
},
],
},
'src/chrome/test/data/autofill/captured_sites/artifacts': {
'url': Var('chrome_git') + '/chrome/test/captured_sites/autofill.git' + '@' + 'a38d810c87a18582e986b94650d9cfa4b67be12c',
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + 'a38d810c87a18582e986b94650d9cfa4b67be12c',
'condition': 'checkout_chromium_autofill_test_dependencies',
},
'src/chrome/test/data/password/captured_sites/artifacts': {
'url': Var('chrome_git') + '/chrome/test/captured_sites/password.git' + '@' + '04b3ea663adf745c52831650e2426b54bc94e65d',
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '04b3ea663adf745c52831650e2426b54bc94e65d',
'condition': 'checkout_chromium_password_manager_test_dependencies',
},
@ -789,17 +763,17 @@ deps = {
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/clank': {
'url': Var('chrome_git') + '/clank/internal/apps.git' + '@' +
'6275e53840a0ff9ce76ec3205cf65a6e4db38a99',
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'd83811f32343245218e742319cec89aaefb94657',
'condition': 'checkout_android and checkout_src_internal',
},
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '049c271a3726ba530df735240bddd8cc5d63ba14',
'url': Var('chromium_git') + '/website.git' + '@' + '182a630499559e1403237d14e2bc6302d384ed39',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '740b64276df73d896a9db6011a3515d20eac30cc',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '8ac47627cb9ba09bf4bc3489b7aff5d77cd6eb88',
'condition': 'checkout_ios',
},
@ -819,7 +793,7 @@ deps = {
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'ba1e10d39bb1fa6086410c0a86d6b06604d082f5',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'a407ef3ac3220882732e701804613c44704ebf78',
'condition': 'checkout_ios',
},
@ -889,7 +863,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'HwyxrY2NcW8Ji7-YLiyZCRwd0EcBDtBITwoLgKCZyDYC',
'version': 'E7vzLhZk6xAJnnioidm0-d5a4cz1OxOr0LJUsCkKKJ0C',
},
],
'dep_type': 'cipd',
@ -900,7 +874,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'GXdygJ5Lqx05fwk4gzmN04ydfOcnrBySGlK_fc5s2pcC',
'version': '4wYh3p2y6ATe5OeiGmtl-G9thdrKGoX5DHzaP8V_tecC',
},
],
'dep_type': 'cipd',
@ -911,7 +885,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'sJ-03ctmnf-O_OKaRdFJ2es8jg1DIs96Vv4tMNx6t58C',
'version': 'E5Y3kcrVZt1PybXoGxTDRb_KmswZX_5jBphOaHwm2fQC',
},
],
'dep_type': 'cipd',
@ -979,7 +953,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': 'Bd5GuYdu-KY7DAsP-QU0XofyaFYU7ySZX8QQpca7j3sC',
'version': 'zEg6hTXAR6Mcqyfh8gHDzD9fmyy1xwz4swj6pkENIYsC',
},
],
'condition': 'checkout_android',
@ -987,7 +961,7 @@ deps = {
},
'src/third_party/androidx_javascriptengine/src': {
'url': Var('chromium_git') + '/aosp/platform/frameworks/support/javascriptengine/javascriptengine/src.git' + '@' + '8fc6d80d8f5fcb120f9a5074bbf2e4df02e23d9f',
'url': Var('chromium_git') + '/aosp/platform/frameworks/support/javascriptengine/javascriptengine/src.git' + '@' + '1f37b986ae4da1f48d55fdd32ed465c6f3659c30',
'condition': 'checkout_android',
},
@ -1050,7 +1024,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'E86vFKi4re9HwIfUW9yq_Ig_hc7Vr0lcl-bO3BtPQLYC',
'version': 'MSpv-kFDDSPO0SY0dLdHegUJcJT1Yy8cL9r3vlAZ9vkC',
},
],
'condition': 'checkout_android',
@ -1061,7 +1035,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '_aoHU11YhUwqKZXVXsn5otnhI-ZVGFT7h1Z9eCcAZM0C',
'version': 'EbRaK62t9grqlZqL-JTd_zwM4t1u9fm1x4c2rLE0cqQC',
},
],
'condition': 'checkout_android',
@ -1116,10 +1090,10 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'dbcaea90641fab03fb7f157a200f53cf148e4542',
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '3d3f3d6f27288d7b0628ae5259238162c5e5ae76',
'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + 'd426d1c91075b9c552b12dd052af1cd0368f05a2',
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + 'ed63a7459376a21e88b871006574dc2055a2ea35',
'src/third_party/dawn':
Var('dawn_git') + '/dawn.git' + '@' + Var('dawn_revision'),
@ -1133,12 +1107,14 @@ deps = {
},
'src/third_party/barhopper': {
'url': Var('chrome_git') + '/chrome/deps/barhopper.git' + '@' + '865bd06ef4a839b0a15d17e38e25f8911e4cdf9f',
'url': 'https://chrome-internal.googlesource.com/chrome/deps/barhopper.git' + '@' + '865bd06ef4a839b0a15d17e38e25f8911e4cdf9f',
'condition': 'checkout_src_internal and checkout_chromeos',
},
'src/third_party/google_benchmark/src':
Var('chromium_git') + '/external/github.com/google/benchmark.git' + '@' + 'efc89f0b524780b1994d5dddd83a92718e5be492',
'src/third_party/google_benchmark/src': {
'url': Var('chromium_git') + '/external/github.com/google/benchmark.git' + '@' + 'f730846b0a3c0dc0699978846fb14ffb2fad0bdc',
'condition': 'checkout_google_benchmark',
},
'src/third_party/boringssl/src':
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
@ -1198,7 +1174,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '324dfc74b569a67598de148e85b2eb833bda1ecb',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '52efbfdc210624f1895d5994149f53c3a4457f29',
'condition': 'checkout_chromeos',
},
@ -1206,7 +1182,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/cld_3.git' + '@' + 'b48dc46512566f5a2d41118c8c1116c4f96dc661',
'src/third_party/colorama/src':
Var('chromium_git') + '/external/colorama.git' + '@' + '3de9f013df4b470069d03d250224062e8cf15c49',
Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'src/third_party/cpuinfo/src':
Var('chromium_git') + '/external/github.com/pytorch/cpuinfo.git' + '@' + 'beb46ca0319882f262e682dd596880c92830687f',
@ -1216,7 +1192,7 @@ deps = {
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '445f3d2fc9a681fafa5ea6657dc18b11be470306',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'ffb88930c81ef7f7026a2433c424d8b3658580d4',
'condition': 'checkout_linux',
},
@ -1230,13 +1206,13 @@ deps = {
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'aa3d37f8c29c170566c26a2eedeefb96c62b9dd3',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a6898e71abf374d699ebaa121312e89bad8f100a',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': {
'url': Var('chrome_git') + '/devtools/devtools-internal.git' + '@' + 'a3167df0ed0e1e919d5e42bf0eb6155a5f83795e',
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '2ac32de43d557d678de46fb7cfc850187f3379fd',
'condition': 'checkout_src_internal',
},
@ -1244,7 +1220,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'f02856c6406cd36b5d7f1b98e27d85df5af08d06',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '3460f3558e7b469efb8a225894e21929c8c77629',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
@ -1255,7 +1231,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/espresso',
'version': '5LoBT0j383h_4dXbnap7gnNQMtMjpbMJD1JaGIYNj-IC',
'version': 'y8fIfH8Leo2cPm7iGCYnBxZpwOlgLv8rm2mlcmJlvGsC',
},
],
'condition': 'checkout_android',
@ -1287,7 +1263,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
'src/third_party/grpc/src': {
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + '822dab21d9995c5cf942476b35ca12a1aa9d2737',
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + 'a017e9b7f20743c69627b94d7d101e4e6baadb44',
},
'src/third_party/freetype/src':
@ -1396,7 +1372,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'e3b6a4c334b9cdbe2d54476c7ac4eb98900d1144',
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'faf22e66ceafad90f5cafbd6707055c24646adcc',
'src/third_party/icu4j': {
'packages': [
@ -1463,7 +1439,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/kotlin_stdlib',
'version': 'XON2v801ZWS7FjApXO8Ev7Me7cOsIAnmqzyCXJuMwJ0C',
'version': 'Mg7371mEUwDQH4_z29HdWqYWVlXN6t2dXX0kIutg_SwC',
},
],
'condition': 'checkout_android',
@ -1494,7 +1470,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '16e24831397a22504541e8ec2674e3cf219e0ac5',
Var('aomedia_git') + '/aom.git' + '@' + '70b12695e1967d9589dd15b345a039e575e8f429',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1508,7 +1484,7 @@ deps = {
'src/third_party/ukey2/src':
Var('chromium_git') + '/external/github.com/google/ukey2.git' + '@' + Var('ukey2_revision'),
'src/third_party/cros-components/src':
'src/third_party/cros_components':
Var('chromium_git') + '/external/google3/cros_components.git' + '@' + Var('cros_components_revision'),
# Userspace interface to kernel DRM services.
@ -1522,12 +1498,12 @@ deps = {
# The library for IPP protocol (Chrome OS).
'src/third_party/libipp/libipp': {
'url': Var('chromium_git') + '/chromiumos/platform2/libipp.git' + '@' + '2209bb84a8e122dab7c02fe66cc61a7b42873d7f',
'url': Var('chromium_git') + '/chromiumos/platform2/libipp.git' + '@' + '6c45a4f3a05cb5dd700414fe4d94cf685159d3ce',
'condition': 'checkout_linux',
},
'src/third_party/libjpeg_turbo':
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'aa4075f116e4312537d0d3e9dbd5e31096539f94',
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '0b6e6a152242c2fa30ffb29633d1b7d7b229b46b',
'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
@ -1555,7 +1531,7 @@ deps = {
},
'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + '394de691a0ef570fc49943f565ad53ee0d22a7f3',
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'db69ce6aea278bee88668fd9cc2af2e544516fdb',
'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
@ -1564,7 +1540,7 @@ deps = {
Var('chromium_git') + '/webm/libwebp.git' + '@' + 'fd7b5d48464475408d32d2611bdb6947d4246b97',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '464c51a0353c71f08fe45f683d6a97a638d47833',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '2bdc210be9eb11ded16bf3ef1f6cadb0d4dcb0c2',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1631,6 +1607,12 @@ deps = {
'condition': 'checkout_linux',
},
# Minizip library. Used on Chrome OS.
'src/third_party/minizip/src': {
'url': Var('chromium_git') + '/external/github.com/nmoinvaz/minizip' + '@' + '1ff40343b55e738d941abb51c70eddb803db16e2',
'condition': 'checkout_linux',
},
'src/third_party/mockito/src': {
'url': Var('chromium_git') + '/external/mockito/mockito.git' + '@' + '04a2a289a4222f80ad20717c25144981210d2eac',
'condition': 'checkout_android',
@ -1685,7 +1667,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '9fa061bbb71807041927af9952aae249014160f1',
Var('chromium_git') + '/openscreen' + '@' + '5d694418bc76f66463f06ce141c375062b0ba3b0',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1702,7 +1684,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'bfdb3fc5cc0fc2ea9324caef920ceb30c1e3d9f5',
Var('android_git') + '/platform/external/perfetto.git' + '@' + '4bda78645d1d23a98473b793bc532a3ebff6c7f9',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1736,13 +1718,13 @@ deps = {
},
'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '11073deb73b3d01018308863c0bcdfd0d51d3e70',
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '3a8436ac436124a57a4e22d5c8713a2d42b381d7',
'src/third_party/r8': {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'EGf7RQo3stt-vPTw69TaMGuNtnOx0Dbk1O-MBquwswYC',
'version': 'HGbnG0_a1HHQtwgKBlFRLuC0-AVyYhHpcTol560MvlUC',
},
],
'condition': 'checkout_android',
@ -1792,7 +1774,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'f6752b7ed1fe3cc1491c0c47ec5804ee2bd0e59b',
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '88742a54683bcdec9a0d0c14462621da8b6f841e',
'src/third_party/sqlite4java': {
'packages': [
@ -1834,20 +1816,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ca388405c617415aebe5669a52f51d36fdb5f255',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '60ec371de65a63d588bcfce7a99482847ad1312e',
'src/third_party/turbine': {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 'epnqx7Yf9QxgyDaU87KJ1fLQvdZ_Mho_JjfpjmIBYWYC',
'version': 'YQC-btuan_DTe9V9dv_e1LxgYSWeOoDfrd-VSqzIvHkC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@7fa3159d78e09b7761f1cc5558bcd014529b4630',
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3c1556cc73226c2895c1de9a925dc5fe623c8752',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1860,7 +1842,7 @@ deps = {
# Wayland protocols that add functionality not available in the core protocol.
'src/third_party/wayland-protocols/src': {
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + '4624cfaaf563cd7be5e2e2087c8de6d3a48ea867',
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + '83866f19d3d61b28e94d71781646466b3a6623d8',
'condition': 'checkout_linux',
},
@ -1884,10 +1866,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '07ce6fa80556ad9ea530f68bd82ffcafede40728',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '6c8361e98f1daba65902f5e2fc1297893ac14b67',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'f6ab0b438e22ea30db7ad3fbf9f870b0d4506235',
Var('webrtc_git') + '/src.git' + '@' + 'd75b9e9ff07ee42841b4e416629c9fbd4b058905',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1899,20 +1881,13 @@ deps = {
'condition': 'checkout_linux',
},
# A conformance-suite developed by canonical for the mir wayland server.
# Required to compile exo_wlcs on chromeos checkouts.
'src/third_party/wlcs/src': {
'url': Var('chromium_git') + '/external/github.com/MirServer/wlcs.git' + '@' + '2930ad4b5ca602446ad211b49fb1827303ce9f4b',
'condition': 'checkout_chromeos',
},
'src/third_party/xdg-utils': {
'url': Var('chromium_git') + '/chromium/deps/xdg-utils.git' + '@' + 'd80274d5869b17b8c9067a1022e4416ee7ed5e0d',
'condition': 'checkout_linux',
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '06b2705f1b3e1ba0f161dd2979e2901ce93014e3',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '659147817805d17c7be2d60bd7bbca7e780f9c82',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1921,7 +1896,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'iQ7zKud-gha6r9hEdwqYWRdOpeAs6gFfDxnviDUt4FQC',
'version': '-G9gUusEGDPsbf_GULdyJo9DYyeNBuqD8gHfdxCvIbYC',
},
],
'dep_type': 'cipd',
@ -1931,7 +1906,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': 'we56UJIWxJJ2GkQ_ne0o3oGAr7FBJa5T5Jr1xguLn-gC',
'version': 'BZ0EL-KSkwCzJciJf9MbwmZAJPRhlKOp0LEYiTV6lWIC',
},
],
'dep_type': 'cipd',
@ -1942,7 +1917,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': '9Wfje1bt82IO9pJokAt9lboy59X_Pe-s0b4EpmH7RT4C',
'version': '0GVvuvDBNt6KJ7UzxBRUW5ShTWCliifyzaCkudNzmrkC',
},
],
'dep_type': 'cipd',
@ -1953,7 +1928,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'zihT2Lk2afg0XzIZozuGcZXWv7RJujaDEi_6q7QL4DgC',
'version': '8vKG1ZGA0f7asv5AHh_7yBxVD2h-I-yR2oY4TOjwo6kC',
},
],
'dep_type': 'cipd',
@ -1964,7 +1939,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': Var('chrome_git') + '/chrome/src-internal.git@7cb646119373a8c3044c0671f387d00a5309f487',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@67c8cac0a84ad86b64ecf3f4af23a928fb605313',
'condition': 'checkout_src_internal',
},
@ -1983,7 +1958,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'ZwywifbvYt1337HGe0M8wkopVNNF-F8iB56x76CHhPUC',
'version': 'WyNqAPOj-HR5fZBbkHIXf7YeyCvf0GpXuhdv6EqzNJsC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1994,7 +1969,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': 'AJctvg8Rp2HdxwonQDlXAJotQAKs3UOuvLPDpQz0DBYC',
'version': 'hF_ZkOgJWb6Tl-9h6WAmpF4VcZggBH4rjoP_hBr2ddUC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -2005,7 +1980,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'jBqCvHK_nRzSlQ39oGemZF_hSmtPKGRom2wqsPvdJpMC',
'version': 'EXosTZG9iiyjnqmWKjS04Tf9dvSUjbHqqhGv1SQW0ycC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -2016,7 +1991,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': 'kYrwWa00iq2L4oUZsXLbRYMP_ffVg025R6d6W00qKrcC',
'version': 'zmInwk2DIsJlzZbF9Fw29hmN6rQTpzqIgzzMAgwl2PkC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -3255,28 +3230,6 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_squareup_okio_okio_jvm': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_squareup_okio_okio_jvm',
'version': 'version:2@3.0.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_squareup_wire_wire_runtime_jvm': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_squareup_wire_wire_runtime_jvm',
'version': 'version:2@4.4.3.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils': {
'packages': [
{
@ -3420,22 +3373,22 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/net_bytebuddy_byte_buddy_android': {
'src/third_party/android_deps/libs/net_ltgt_gradle_incap_incap': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_android',
'version': 'version:2@1.12.22.cr1',
'package': 'chromium/third_party/android_deps/libs/net_ltgt_gradle_incap_incap',
'version': 'version:2@0.2.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/net_ltgt_gradle_incap_incap': {
'src/third_party/android_deps/libs/net_sf_kxml_kxml2': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_ltgt_gradle_incap_incap',
'version': 'version:2@0.2.cr1',
'package': 'chromium/third_party/android_deps/libs/net_sf_kxml_kxml2',
'version': 'version:2@2.3.0.cr1',
},
],
'condition': 'checkout_android',
@ -3618,17 +3571,6 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_mockito_mockito_android': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_android',
'version': 'version:2@5.1.1.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_mockito_mockito_core': {
'packages': [
{
@ -3640,17 +3582,6 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_mockito_mockito_subclass': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_subclass',
'version': 'version:2@5.1.1.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_objenesis_objenesis': {
'packages': [
{
@ -3935,7 +3866,6 @@ include_rules = [
'+library_loaders',
'+testing',
'+third_party/google_benchmark/src/include/benchmark/benchmark.h',
'+third_party/icu/source/common/unicode',
'+third_party/icu/source/i18n/unicode',
'+url',
@ -4145,7 +4075,7 @@ hooks = [
# Update prebuilt Rust toolchain.
'name': 'rust-toolchain',
'pattern': '.',
'condition': 'checkout_rust',
'condition': 'fetch_prebuilt_chromium_rust_toolchain',
'action': ['python3', 'src/tools/rust/update_rust.py'],
},
{
@ -4730,7 +4660,7 @@ hooks = [
],
},
{
'name': 'Fetch ChromeOS AFDO profiles (from Intel Big cores)',
'name': 'Fetch Chrome OS AFDO profiles (from Intel big cores)',
'pattern': '.',
'condition': 'checkout_chromeos or checkout_simplechrome',
'action': [ 'python3',
@ -4753,18 +4683,6 @@ hooks = [
'--gs_url_base=chromeos-prebuilt/afdo-job/vetted/release',
],
},
{
'name': 'Fetch ChromeOS AFDO profiles (experimental Arm profile)',
'pattern': '.',
'condition': 'checkout_chromeos or checkout_simplechrome',
'action': [ 'python3',
'src/tools/download_optimization_profile.py',
'--newest_state=src/chromeos/profiles/arm-exp.afdo.newest.txt',
'--local_state=src/chromeos/profiles/arm-exp.afdo.local.txt',
'--output_name=src/chromeos/profiles/arm-exp.afdo.prof',
'--gs_url_base=chromeos-prebuilt/afdo-job/vetted/release',
],
},
{
# Pull doclava binaries if building for Android.
'name': 'doclava',
@ -5007,19 +4925,6 @@ hooks = [
'--gs-url-base=chromium-optimization-profiles/pgo_profiles',
],
},
{
'name': 'Fetch PGO profiles for V8 builtins',
'pattern': '.',
# Always download profiles on Android builds. The GN arg `is_official_build`
# is required to consider the profiles during build time.
'condition': 'checkout_pgo_profiles or checkout_android',
'action': [ 'python3',
'src/v8/tools/builtins-pgo/download_profiles.py',
'download',
'--depot-tools',
'src/third_party/depot_tools',
],
},
# Download Cast3p Binaries
{
@ -5065,23 +4970,6 @@ hooks = [
'--bucket', 'chromium-style-perftest',
'-d', 'src/third_party/blink/renderer/core/css/perftest_data'],
},
# Download remote exec cfg files
{
'name': 'fetch_reclient_cfgs',
'pattern': '.',
'condition': 'download_remoteexec_cfg',
'action': ['python3',
'src/buildtools/reclient_cfgs/fetch_reclient_cfgs.py',
'--rbe_instance',
Var('rbe_instance'),
'--reproxy_cfg_template',
'reproxy.cfg.template',
'--rewrapper_cfg_project',
Var('rewrapper_cfg_project'),
'--quiet',
'--hook',
],
},
]
# Add any corresponding DEPS files from this list to chromium.exclusions in

View File

@ -19,7 +19,6 @@
import("//base/allocator/allocator.gni")
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//base/debug/debug.gni")
import("//base/trace_event/tracing.gni")
import("//build/buildflag_header.gni")
import("//build/config/arm.gni")
@ -121,21 +120,12 @@ if (is_fuchsia) {
# TODO(crbug.com/1304707): Drop toolchain_has_rust after we have support for all
# our toolchains: Linux x86 is missing in order to build for Android.
#
# Rust to C++ type conversions.
build_rust_base_conversions = toolchain_has_rust && enable_rust_base_conversions
# The Rust implementation of base::JSONReader.
build_rust_json_reader = toolchain_has_rust && enable_rust_json
assert(build_rust_base_conversions || !build_rust_json_reader,
"Cannot enable Rust JSON decoder without also base conversions")
buildflag_header("rust_buildflags") {
header = "rust_buildflags.h"
flags = [
"BUILD_RUST_JSON_READER=$build_rust_json_reader",
"BUILD_RUST_BASE_CONVERSIONS=$build_rust_base_conversions",
]
buildflag_header("parsing_buildflags") {
header = "parsing_buildflags.h"
flags = [ "BUILD_RUST_JSON_READER=$build_rust_json_reader" ]
}
if (is_win) {
@ -254,6 +244,7 @@ component("base") {
"big_endian.h",
"bit_cast.h",
"bits.h",
"build_time.cc",
"build_time.h",
"callback_list.cc",
"callback_list.h",
@ -451,7 +442,6 @@ component("base") {
"memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h",
"memory/singleton.h",
"memory/stack_allocated.h",
"memory/unsafe_shared_memory_pool.cc",
"memory/unsafe_shared_memory_pool.h",
"memory/unsafe_shared_memory_region.cc",
@ -480,8 +470,6 @@ component("base") {
"metrics/dummy_histogram.h",
"metrics/field_trial.cc",
"metrics/field_trial.h",
"metrics/field_trial_list_including_low_anonymity.cc",
"metrics/field_trial_list_including_low_anonymity.h",
"metrics/field_trial_param_associator.cc",
"metrics/field_trial_param_associator.h",
"metrics/field_trial_params.cc",
@ -942,7 +930,6 @@ component("base") {
"types/pass_key.h",
"types/strong_alias.h",
"types/token_type.h",
"types/variant_util.h",
"unguessable_token.cc",
"unguessable_token.h",
"value_iterators.cc",
@ -1077,9 +1064,9 @@ component("base") {
":ios_cronet_buildflags",
":logging_buildflags",
":orderfile_buildflags",
":parsing_buildflags",
":power_monitor_buildflags",
":profiler_buildflags",
":rust_buildflags",
":sanitizer_buildflags",
":synchronization_buildflags",
":tracing_buildflags",
@ -1087,10 +1074,11 @@ component("base") {
"//base/numerics:base_numerics",
"//build:chromecast_buildflags",
"//build:chromeos_buildflags",
"//build/rust:rust_buildflags",
"//third_party/abseil-cpp:absl",
]
if (build_rust_base_conversions) {
if (toolchain_has_rust) {
# Base provides conversions between CXX types and base types (e.g.
# StringPiece).
public_deps += [ "//build/rust:cxx_cppdeps" ]
@ -1216,8 +1204,6 @@ component("base") {
"os_compat_android.cc",
"os_compat_android.h",
"process/process_android.cc",
"profiler/native_unwinder_android_map_delegate.h",
"profiler/native_unwinder_android_memory_regions_map.h",
"profiler/stack_sampler_android.cc",
"system/sys_info_android.cc",
"threading/platform_thread_android_stub.cc",
@ -1278,13 +1264,6 @@ component("base") {
libs += [ "android/library_loader/anchor_functions.lds" ]
} # is_android
if (build_allocation_stack_trace_recorder) {
sources += [
"debug/allocation_trace.cc",
"debug/allocation_trace.h",
]
}
if (is_robolectric) {
# Make jni.h available.
configs += [ "//third_party/jdk" ]
@ -1356,12 +1335,10 @@ component("base") {
"files/memory_mapped_file_posix.cc",
"fuchsia/default_job.cc",
"fuchsia/default_job.h",
"fuchsia/fidl_event_handler.h",
"fuchsia/file_utils.cc",
"fuchsia/file_utils.h",
"fuchsia/filtered_service_directory.cc",
"fuchsia/filtered_service_directory.h",
"fuchsia/fuchsia_component_connect.h",
"fuchsia/fuchsia_logging.cc",
"fuchsia/fuchsia_logging.h",
"fuchsia/intl_profile_watcher.cc",
@ -1430,16 +1407,13 @@ component("base") {
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.component.runner:fuchsia.component.runner_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.intl:fuchsia.intl_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mem:fuchsia.mem_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/async",
"//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/fdio",
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp_wire",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/syslog_structured_backend",
@ -1448,10 +1422,9 @@ component("base") {
]
deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp_hlcpp_conversion",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys:fuchsia.sys_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/async-default",
"//third_party/fuchsia-sdk/sdk/pkg/async-loop-cpp",
@ -1844,7 +1817,7 @@ component("base") {
"win/sphelper.h",
"win/startup_information.cc",
"win/startup_information.h",
"win/variant_conversions.h",
"win/variant_util.h",
"win/variant_vector.cc",
"win/variant_vector.h",
"win/vector.cc",
@ -1927,6 +1900,8 @@ component("base") {
"mac/authorization_util.h",
"mac/authorization_util.mm",
"mac/close_nocancel.cc",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
"mac/launch_application.h",
"mac/launch_application.mm",
"mac/launchd.cc",
@ -1991,6 +1966,7 @@ component("base") {
"CoreFoundation.framework",
"IOKit.framework",
"OpenDirectory.framework",
"Security.framework",
]
}
@ -2007,8 +1983,6 @@ component("base") {
"mac/call_with_eh_frame.cc",
"mac/call_with_eh_frame.h",
"mac/call_with_eh_frame_asm.S",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
"mac/foundation_util.h",
"mac/foundation_util.mm",
"mac/mac_logging.h",
@ -2037,7 +2011,6 @@ component("base") {
"threading/platform_thread_mac.mm",
"time/time_mac.mm",
]
frameworks += [ "Security.framework" ]
}
# Linux.
@ -2095,7 +2068,9 @@ component("base") {
"mac/bridging.h",
"native_library_ios.mm",
"power_monitor/power_monitor_device_source_ios.mm",
"process/launch_ios.cc",
"process/process_metrics_ios.cc",
"process/process_stubs.cc",
"profiler/stack_sampler_ios.cc",
"system/sys_info_ios.mm",
]
@ -2105,27 +2080,13 @@ component("base") {
"files/file_path_watcher_kqueue.cc",
"files/file_path_watcher_kqueue.h",
"files/file_path_watcher_mac.cc",
"ios/sim_header_shims.h",
"mac/mach_port_rendezvous.cc",
"mac/mach_port_rendezvous.h",
"process/launch_mac.cc",
"process/memory_mac.mm",
"process/port_provider_mac.cc",
"process/port_provider_mac.h",
"process/process_handle_mac.cc",
"process/process_iterator_ios.mm",
"process/process_mac.cc",
"process/process_posix.cc",
"sync_socket_posix.cc",
"synchronization/waitable_event_watcher_mac.cc",
]
libs += [ "bsm" ]
} else {
sources += [
"process/launch_ios.cc",
"process/memory_stubs.cc",
"process/process_stubs.cc",
]
sources += [ "process/memory_stubs.cc" ]
}
if (is_cronet_build) {
@ -2157,8 +2118,6 @@ component("base") {
]
} else {
sources -= [
"allocator/dispatcher/dispatcher.cc",
"allocator/dispatcher/dispatcher.h",
"sampling_heap_profiler/poisson_allocation_sampler.cc",
"sampling_heap_profiler/poisson_allocation_sampler.h",
"sampling_heap_profiler/sampling_heap_profiler.cc",
@ -2417,10 +2376,6 @@ buildflag_header("debugging_buildflags") {
enable_lldbinit_warning =
is_debug && strip_absolute_paths_from_debug_symbols && is_mac
# TODO(crbug.com/1420774): Try and enable these checks on Android too.
enable_commandline_sequence_checks =
(is_debug || dcheck_always_on) && !is_android
flags = [
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_LOCATION_SOURCE=$enable_location_source",
@ -2433,8 +2388,6 @@ buildflag_header("debugging_buildflags") {
"ENABLE_LLDBINIT_WARNING=$enable_lldbinit_warning",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"ENABLE_STACK_TRACE_LINE_NUMBERS=$enable_stack_trace_line_numbers",
"ENABLE_COMMANDLINE_SEQUENCE_CHECKS=$enable_commandline_sequence_checks",
"ENABLE_ALLOCATION_STACK_TRACE_RECORDER=$build_allocation_stack_trace_recorder",
]
}
@ -2589,7 +2542,7 @@ if (use_custom_libcxx && enable_safe_libcxx && !is_debug) {
}
action("build_date") {
script = "write_build_date_header.py"
script = "//build/write_build_date_header.py"
outputs = [ "$target_gen_dir/generated_build_date.h" ]

View File

@ -18,8 +18,6 @@ buildflag_header("buildflags") {
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support",
"USE_ALLOCATION_EVENT_DISPATCHER=$use_allocation_event_dispatcher",
]
}

View File

@ -12,11 +12,6 @@ if (is_ios) {
declare_args() {
# Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = use_allocator_shim_default
# Use the new allocation event dispatcher to distribute events to event observers.
# If set to false, PoissonAllocationSampler will hook into PartitionAllocator and
# AllocatorShims directly.
use_allocation_event_dispatcher = false
}
assert(
@ -34,12 +29,14 @@ if (is_win && use_allocator_shim) {
# Chromium-specific asserts. External embedders _may_ elect to use these
# features even without PA-E.
if (!use_partition_alloc_as_malloc) {
# In theory, BackupRefPtr will work just fine without
# PartitionAlloc-Everywhere, but its scope would be limited to partitions
# In theory, BackupRefPtr/MTECheckedPtr will work just fine without
# PartitionAlloc-Everywhere, but their scope would be limited to partitions
# that are invoked explicitly (not via malloc). These are only Blink
# partition, where we currently don't even use raw_ptr<T>.
assert(!enable_backup_ref_ptr_support,
"Chromium does not use BRP without PA-E")
assert(!enable_mte_checked_ptr_support,
"Chromium does not use MTECheckedPtr without PA-E")
# Pointer compression works only if all pointers are guaranteed to be
# allocated by PA (in one of its core pools, to be precise). In theory,

View File

@ -18,7 +18,6 @@
#include <atomic>
#endif
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details {
namespace {
@ -224,11 +223,9 @@ void PartitionFreeHook(void* address) {
} // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
namespace base::allocator::dispatcher {
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
void InstallStandardAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::InsertAllocatorDispatch(
@ -245,7 +242,10 @@ void InstallStandardAllocatorHooks() {
&partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
}
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
} // namespace base::allocator::dispatcher
namespace base::allocator::dispatcher {
// The private implementation of Dispatcher.
struct Dispatcher::Impl {

View File

@ -5,7 +5,6 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h"
@ -13,9 +12,7 @@
namespace base::allocator::dispatcher {
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
void BASE_EXPORT InstallStandardAllocatorHooks();
#endif
namespace internal {
struct DispatchData;

View File

@ -274,7 +274,7 @@ struct DispatcherImpl {
static AllocatorDispatch allocator_dispatch_;
#endif
ALWAYS_INLINE static void DoNotifyAllocation(
static ALWAYS_INLINE void DoNotifyAllocation(
void* address,
size_t size,
AllocationSubsystem subSystem,
@ -283,7 +283,7 @@ struct DispatcherImpl {
subSystem, type_name);
}
ALWAYS_INLINE static void DoNotifyFree(void* address) {
static ALWAYS_INLINE void DoNotifyFree(void* address) {
PerformFreeNotification(s_observers, AllObservers{}, address);
}

View File

@ -32,7 +32,7 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
@ -105,7 +105,7 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \
BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT
#else
@ -125,7 +125,6 @@ const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam {
&kPartitionAllocBackupRefPtr, "enabled-processes",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
BackupRefPtrEnabledProcesses::kNonRenderer,
#else
@ -138,12 +137,8 @@ constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
{BackupRefPtrMode::kEnabledWithMemoryReclaimer,
"enabled-with-memory-reclaimer"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions2WayWithMemoryReclaimer,
"disabled-but-2-way-split-with-memory-reclaimer"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
"disabled-but-3-way-split"},
{BackupRefPtrMode::kDisabledButAddDummyRefCount,

View File

@ -90,17 +90,10 @@ enum class BackupRefPtrMode {
// Same as kEnabled but without zapping quarantined objects.
kEnabledWithoutZapping,
// Same as kEnabled but registers the main partition to memory reclaimer.
kEnabledWithMemoryReclaimer,
// BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode.
kDisabledButSplitPartitions2Way,
// Same as kDisabledButSplitPartitions2Way but registers the main partition to
// memory reclaimer.
kDisabledButSplitPartitions2WayWithMemoryReclaimer,
// BRP is disabled, but the main partition *and* aligned partition are split
// out, as if BRP was enabled in the "before allocation" mode.
kDisabledButSplitPartitions3Way,

View File

@ -336,22 +336,11 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
brp_group_name = "EnabledPrevSlotWithoutZapping";
#else
brp_group_name = "EnabledBeforeAllocWithoutZapping";
#endif
break;
case features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlotWithMemoryReclaimer";
#else
brp_group_name = "EnabledBeforeAllocWithMemoryReclaimer";
#endif
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
brp_group_name = "DisabledBut2WaySplit";
break;
case features::BackupRefPtrMode::
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
brp_group_name = "DisabledBut2WaySplitWithMemoryReclaimer";
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
brp_group_name = "DisabledBut3WaySplit";
break;
@ -631,19 +620,18 @@ void DanglingRawPtrReleased(uintptr_t id) {
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n"
<< dangling_signature << "\n\n"
<< "The memory was freed at:\n"
<< free_info->stack_trace << "\n"
<< free_info->task_trace << "\n"
<< free_info->stack_trace << free_info->task_trace << "\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n"
<< task_trace_release << dangling_ptr_footer;
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
} else {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n"
<< task_trace_release << dangling_ptr_footer;
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
}
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
@ -840,132 +828,10 @@ void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
PartitionAllocSupport::PartitionAllocSupport() = default;
void PartitionAllocSupport::ReconfigureForTests() {
ReconfigureEarlyish("");
base::AutoLock scoped_lock(lock_);
called_for_tests_ = true;
}
// static
PartitionAllocSupport::BrpConfiguration
PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
bool enable_brp = false;
bool enable_brp_zapping = false;
bool split_main_partition = false;
bool use_dedicated_aligned_partition = false;
bool add_dummy_ref_count = false;
bool process_affected_by_brp_flag = false;
bool enable_memory_reclaimer = false;
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_affected_by_brp_flag = process_type.empty();
break;
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_affected_by_brp_flag =
process_type.empty() ||
(process_type == switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_affected_by_brp_flag =
(process_type != switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_affected_by_brp_flag = true;
break;
}
}
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (process_affected_by_brp_flag) {
switch (base::features::kBackupRefPtrModeParam.Get()) {
case base::features::BackupRefPtrMode::kDisabled:
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
break;
case base::features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
enable_memory_reclaimer = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabled:
enable_brp_zapping = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
enable_brp = true;
split_main_partition = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// AlignedAlloc relies on natural alignment offered by the allocator
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
// in front of the allocation will mess up that alignment. Such extras
// are used when BackupRefPtr is on, in which case, we need a separate
// partition, dedicated to handle only aligned allocations, where those
// extras are disabled. However, if the "previous slot" variant is used,
// no dedicated partition is needed, as the extras won't interfere with
// the alignment requirements.
use_dedicated_aligned_partition = true;
#endif
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
split_main_partition = true;
break;
case base::features::BackupRefPtrMode::
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
split_main_partition = true;
enable_memory_reclaimer = true;
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
split_main_partition = true;
use_dedicated_aligned_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
split_main_partition = true;
add_dummy_ref_count = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
use_dedicated_aligned_partition = true;
#endif
break;
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return {enable_brp,
enable_brp_zapping,
enable_memory_reclaimer,
split_main_partition,
use_dedicated_aligned_partition,
add_dummy_ref_count,
process_affected_by_brp_flag};
}
void PartitionAllocSupport::ReconfigureEarlyish(
const std::string& process_type) {
{
base::AutoLock scoped_lock(lock_);
// In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
// is earlier than ContentMain().
if (called_for_tests_) {
DCHECK(called_earlyish_);
return;
}
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(!called_earlyish_)
<< "ReconfigureEarlyish was already called for process '"
@ -1016,11 +882,8 @@ void PartitionAllocSupport::ReconfigureAfterZygoteFork(
}
void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
const std::string& process_type,
bool configure_dangling_pointer_detector) {
if (configure_dangling_pointer_detector) {
const std::string& process_type) {
base::allocator::InstallDanglingRawPtrChecks();
}
base::allocator::InstallUnretainedDanglingRawPtrChecks();
{
base::AutoLock scoped_lock(lock_);
@ -1051,11 +914,46 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
}
DCHECK_NE(process_type, switches::kZygoteProcess);
[[maybe_unused]] BrpConfiguration brp_config =
GetBrpConfiguration(process_type);
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
[[maybe_unused]] bool enable_brp = false;
[[maybe_unused]] bool enable_brp_zapping = false;
[[maybe_unused]] bool split_main_partition = false;
[[maybe_unused]] bool use_dedicated_aligned_partition = false;
[[maybe_unused]] bool add_dummy_ref_count = false;
[[maybe_unused]] bool process_affected_by_brp_flag = false;
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_affected_by_brp_flag = process_type.empty();
break;
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_affected_by_brp_flag =
process_type.empty() ||
(process_type == switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_affected_by_brp_flag =
(process_type != switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_affected_by_brp_flag = true;
break;
}
}
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (brp_config.process_affected_by_brp_flag) {
if (process_affected_by_brp_flag) {
base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck(
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
@ -1071,16 +969,62 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
}
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (process_affected_by_brp_flag) {
switch (base::features::kBackupRefPtrModeParam.Get()) {
case base::features::BackupRefPtrMode::kDisabled:
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
break;
case base::features::BackupRefPtrMode::kEnabled:
enable_brp_zapping = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
enable_brp = true;
split_main_partition = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// AlignedAlloc relies on natural alignment offered by the allocator
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
// in front of the allocation will mess up that alignment. Such extras
// are used when BackupRefPtr is on, in which case, we need a separate
// partition, dedicated to handle only aligned allocations, where those
// extras are disabled. However, if the "previous slot" variant is used,
// no dedicated partition is needed, as the extras won't interfere with
// the alignment requirements.
use_dedicated_aligned_partition = true;
#endif
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
split_main_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
split_main_partition = true;
use_dedicated_aligned_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
split_main_partition = true;
add_dummy_ref_count = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
use_dedicated_aligned_partition = true;
#endif
break;
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
allocator_shim::ConfigurePartitions(
allocator_shim::EnableBrp(brp_config.enable_brp),
allocator_shim::EnableBrpZapping(brp_config.enable_brp_zapping),
allocator_shim::EnableBrpPartitionMemoryReclaimer(
brp_config.enable_brp_partition_memory_reclaimer),
allocator_shim::SplitMainPartition(brp_config.split_main_partition),
allocator_shim::EnableBrp(enable_brp),
allocator_shim::EnableBrpZapping(enable_brp_zapping),
allocator_shim::SplitMainPartition(split_main_partition),
allocator_shim::UseDedicatedAlignedPartition(
brp_config.use_dedicated_aligned_partition),
allocator_shim::AddDummyRefCount(brp_config.add_dummy_ref_count),
use_dedicated_aligned_partition),
allocator_shim::AddDummyRefCount(add_dummy_ref_count),
allocator_shim::AlternateBucketDistribution(
base::features::kPartitionAllocAlternateBucketDistributionParam
.Get()));
@ -1089,7 +1033,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false;
#if BUILDFLAG(USE_STARSCAN)
if (!brp_config.enable_brp) {
if (!enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process.
if (process_type.empty()) {

View File

@ -43,15 +43,6 @@ BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
// Allows to re-configure PartitionAlloc at run-time.
class BASE_EXPORT PartitionAllocSupport {
public:
struct BrpConfiguration {
bool enable_brp = false;
bool enable_brp_zapping = false;
bool enable_brp_partition_memory_reclaimer = false;
bool split_main_partition = false;
bool use_dedicated_aligned_partition = false;
bool add_dummy_ref_count = false;
bool process_affected_by_brp_flag = false;
};
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to
// configure PartitionAlloc before/at its initialization using information not
// known at compile-time (e.g. process type, Finch), because by the time this
@ -75,12 +66,9 @@ class BASE_EXPORT PartitionAllocSupport {
// re-configuration steps exactly once.
//
// *AfterTaskRunnerInit() may be called more than once.
void ReconfigureForTests();
void ReconfigureEarlyish(const std::string& process_type);
void ReconfigureAfterZygoteFork(const std::string& process_type);
void ReconfigureAfterFeatureListInit(
const std::string& process_type,
bool configure_dangling_pointer_detector = true);
void ReconfigureAfterFeatureListInit(const std::string& process_type);
void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
// |has_main_frame| tells us if the renderer contains a main frame.
@ -97,13 +85,10 @@ class BASE_EXPORT PartitionAllocSupport {
return singleton;
}
static BrpConfiguration GetBrpConfiguration(const std::string& process_type);
private:
PartitionAllocSupport();
base::Lock lock_;
bool called_for_tests_ GUARDED_BY(lock_) = false;
bool called_earlyish_ GUARDED_BY(lock_) = false;
bool called_after_zygote_fork_ GUARDED_BY(lock_) = false;
bool called_after_feature_list_init_ GUARDED_BY(lock_) = false;

View File

@ -92,7 +92,6 @@ component("partition_alloc") {
"partition_alloc_base/cpu.cc",
"partition_alloc_base/cpu.h",
"partition_alloc_base/cxx17_backports.h",
"partition_alloc_base/cxx20_is_constant_evaluated.h",
"partition_alloc_base/debug/alias.cc",
"partition_alloc_base/debug/alias.h",
"partition_alloc_base/gtest_prod_util.h",
@ -103,6 +102,7 @@ component("partition_alloc") {
"partition_alloc_base/memory/ref_counted.h",
"partition_alloc_base/memory/scoped_policy.h",
"partition_alloc_base/memory/scoped_refptr.h",
"partition_alloc_base/migration_adapter.h",
"partition_alloc_base/no_destructor.h",
"partition_alloc_base/numerics/checked_math.h",
"partition_alloc_base/numerics/checked_math_impl.h",
@ -160,6 +160,9 @@ component("partition_alloc") {
"partition_root.h",
"partition_stats.cc",
"partition_stats.h",
"partition_tag.h",
"partition_tag_bitmap.h",
"partition_tag_types.h",
"partition_tls.h",
"pkey.cc",
"pkey.h",
@ -426,11 +429,9 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
@ -440,6 +441,10 @@ buildflag_header("partition_alloc_buildflags") {
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
# Not to be used directly - instead use
# PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
"ENABLE_MTE_CHECKED_PTR_SUPPORT=$enable_mte_checked_ptr_support",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$glue_core_pools",

View File

@ -139,9 +139,6 @@ include_rules = [
specific_include_rules = {
".*_(perf|unit)test\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h",
"+base/test/gtest_util.h",

View File

@ -62,18 +62,16 @@ void AddressPoolManager::GetPoolUsedSuperPages(
pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used) {
Pool* pool = GetPool(handle);
if (!pool) {
if (!pool)
return;
}
pool->GetUsedSuperPages(used);
}
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
Pool* pool = GetPool(handle);
if (!pool) {
if (!pool)
return 0;
}
return pool->GetBaseAddress();
}
@ -94,13 +92,11 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
Pool* pool = GetPool(handle);
if (!requested_address) {
if (!requested_address)
return pool->FindChunk(length);
}
const bool is_available = pool->TryReserveChunk(requested_address, length);
if (is_available) {
if (is_available)
return requested_address;
}
return pool->FindChunk(length);
}
@ -167,9 +163,8 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_) {
if (end_bit > total_bits_)
return 0;
}
bool found = true;
for (; curr_bit < end_bit; ++curr_bit) {
@ -181,11 +176,10 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
// next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1;
found = false;
if (bit_hint_ == curr_bit) {
if (bit_hint_ == curr_bit)
++bit_hint_;
}
}
}
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
// mark as allocated) and return the allocated address.
@ -218,15 +212,13 @@ bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
const size_t need_bits = requested_size / kSuperPageSize;
const size_t end_bit = begin_bit + need_bits;
// Check that requested address is not too high.
if (end_bit > total_bits_) {
if (end_bit > total_bits_)
return false;
}
// Check if any bit of the requested region is set already.
for (size_t i = begin_bit; i < end_bit; ++i) {
if (alloc_bitset_.test(i)) {
if (alloc_bitset_.test(i))
return false;
}
}
// Otherwise, set the bits.
for (size_t i = begin_bit; i < end_bit; ++i) {
alloc_bitset_.set(i);
@ -528,10 +520,9 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get blocklist size.
for (const auto& blocked :
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
if (blocked.load(std::memory_order_relaxed)) {
if (blocked.load(std::memory_order_relaxed))
stats->blocklist_size += 1;
}
}
// Count failures in finding non-blocklisted addresses.
stats->blocklist_hit_count =

View File

@ -33,12 +33,10 @@ uintptr_t GetRandomPageBase() {
// randomization isn't buying anything. In that case we just skip it.
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
}
if (!is_wow64) {
if (!is_wow64)
return 0;
}
#endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask();
random += internal::ASLROffset();

View File

@ -20,11 +20,11 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
namespace internal {
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrAddress(uintptr_t mask) {
return mask & PageAllocationGranularityBaseMask();
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
@ -45,11 +45,11 @@ AslrMask(uintptr_t bits) {
// hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See
// https://crbug.com/539863.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrAddress(0x007fffffffffULL);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x7e8000000000ULL);
}
@ -59,11 +59,11 @@ AslrMask(uintptr_t bits) {
// Windows 8.10 and newer support the full 48 bit address range. Since
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
// http://www.alex-ionescu.com/?p=246
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(47);
}
// Try not to map pages into the range where Windows loads DLLs by default.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return 0x80000000ULL;
}
@ -82,11 +82,11 @@ AslrMask(uintptr_t bits) {
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
// Be careful, there is a zone where macOS will not map memory, at least
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
@ -104,10 +104,10 @@ AslrMask(uintptr_t bits) {
// Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
@ -117,10 +117,10 @@ AslrMask(uintptr_t bits) {
// Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
@ -130,11 +130,11 @@ AslrMask(uintptr_t bits) {
// page size and number of levels of translation pages used. We use
// 39-bit as base as all setups should support this, lowered to 38-bit
// as ASLROffset() could cause a carry.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() {
return AslrMask(38);
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
@ -143,10 +143,10 @@ AslrMask(uintptr_t bits) {
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
// could cause a carry.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(38);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x1000000000ULL);
}
@ -159,30 +159,30 @@ AslrMask(uintptr_t bits) {
// AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x400000000000ULL);
}
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(42);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
@ -193,10 +193,10 @@ AslrMask(uintptr_t bits) {
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(40);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
@ -204,10 +204,10 @@ AslrMask(uintptr_t bits) {
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(29);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0);
}
@ -215,7 +215,7 @@ AslrMask(uintptr_t bits) {
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
@ -231,7 +231,7 @@ AslrMask(uintptr_t bits) {
// fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x80000000ULL);
}
@ -239,7 +239,7 @@ AslrMask(uintptr_t bits) {
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x90000000ULL);
}
@ -248,7 +248,7 @@ AslrMask(uintptr_t bits) {
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}
@ -264,10 +264,10 @@ AslrMask(uintptr_t bits) {
// This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30);
}
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL);
}

View File

@ -15,9 +15,8 @@ thread_local bool g_disallow_allocations;
} // namespace
ScopedDisallowAllocations::ScopedDisallowAllocations() {
if (g_disallow_allocations) {
if (g_disallow_allocations)
PA_IMMEDIATE_CRASH();
}
g_disallow_allocations = true;
}

View File

@ -13,6 +13,7 @@ import("//build_overrides/build.gni")
use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = true
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false

View File

@ -227,7 +227,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
internal::CompressedPointerBaseGlobal::kBitsToShift +
kBitsForSignExtension;
PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) {
static PA_ALWAYS_INLINE UnderlyingType Compress(T* ptr) {
static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
@ -252,14 +252,13 @@ class PA_TRIVIAL_ABI CompressedPointer final {
// frequent operation, we let more work here in favor of faster
// decompression.
// TODO(1376980): Avoid this by overreserving the heap.
if (compressed) {
if (compressed)
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
}
return compressed;
}
PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) {
static PA_ALWAYS_INLINE T* Decompress(UnderlyingType ptr) {
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
// Treat compressed pointer as signed and cast it to uint64_t, which will
@ -461,13 +460,13 @@ class PA_TRIVIAL_ABI UncompressedPointer final {
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
PA_ALWAYS_INLINE constexpr UncompressedPointer(
const UncompressedPointer<U>& other)
: ptr_(other.ptr_) {}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
PA_ALWAYS_INLINE constexpr UncompressedPointer(
UncompressedPointer<U>&& other) noexcept
: ptr_(std::move(other.ptr_)) {}

View File

@ -18,9 +18,8 @@ namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->flags.with_thread_cache) {
if (!root || !root->flags.with_thread_cache)
return;
}
ThreadCacheRegistry::Instance().PurgeAll();
root->flags.with_thread_cache = false;
@ -31,9 +30,8 @@ void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) {
if (!root) {
if (!root)
return;
}
root->flags.with_thread_cache = true;
}
@ -44,9 +42,8 @@ void DisablePartitionAllocThreadCacheForProcess() {
auto* aligned_allocator =
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
DisableThreadCacheForRootIfEnabled(regular_allocator);
if (aligned_allocator != regular_allocator) {
if (aligned_allocator != regular_allocator)
DisableThreadCacheForRootIfEnabled(aligned_allocator);
}
DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
}
@ -56,79 +53,45 @@ void DisablePartitionAllocThreadCacheForProcess() {
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
DisablePartitionAllocThreadCacheForProcess();
#else
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
ThreadCache::SwapForTesting(root);
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
}
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
ThreadCache::SwapForTesting(regular_allocator);
#else
ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
}
ThreadAllocStats GetAllocStatsForCurrentThread() {
ThreadCache* thread_cache = ThreadCache::Get();
if (ThreadCache::IsValid(thread_cache)) {
if (ThreadCache::IsValid(thread_cache))
return thread_cache->thread_alloc_stats();
}
return {};
}
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
ThreadSafePartitionRoot* root)
: root_(root) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
regular_was_enabled_ =
regular_allocator && regular_allocator->flags.with_thread_cache;
if (root_ != regular_allocator) {
// Another |root| is ThreadCache's PartitionRoot. Need to disable
// thread cache for the process.
DisablePartitionAllocThreadCacheForProcess();
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
// Replace ThreadCache's PartitionRoot.
ThreadCache::SwapForTesting(root_);
} else {
if (!regular_was_enabled_) {
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_);
}
}
#else
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(ThreadCache::Get());
}
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
bool regular_enabled =
regular_allocator && regular_allocator->flags.with_thread_cache;
if (regular_was_enabled_) {
if (!regular_enabled) {
// Need to re-enable ThreadCache for the process.
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
// In the case, |regular_allocator| must be ThreadCache's root.
ThreadCache::SwapForTesting(regular_allocator);
} else {
// ThreadCache is enabled for the process, but we need to be
// careful about ThreadCache's PartitionRoot. If it is different from
// |regular_allocator|, we need to invoke SwapForTesting().
if (regular_allocator != root_) {
ThreadCache::SwapForTesting(regular_allocator);
}
}
} else {
// ThreadCache for all processes was disabled.
DisableThreadCacheForRootIfEnabled(regular_allocator);
ThreadCache::SwapForTesting(nullptr);
}
#else
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root_);
ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
} // namespace partition_alloc::internal

View File

@ -5,38 +5,25 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace partition_alloc::internal {
// These two functions are unsafe to run if there are multiple threads running
// in the process.
//
// Disables the thread cache for the entire process, and replaces it with a
// thread cache for |root|.
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Disables the current thread cache, and replaces it with the default for the
// process.
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Get allocation stats for the thread cache partition on the current
// thread. See the documentation of ThreadAllocStats for details.
ThreadAllocStats GetAllocStatsForCurrentThread();
// Creates a scope for testing which:
// - if the given |root| is a default malloc root for the entire process,
// enables the thread cache for the entire process.
// (This may happen if UsePartitionAllocAsMalloc is enabled.)
// - otherwise, disables the thread cache for the entire process, and
// replaces it with a thread cache for |root|.
// This class is unsafe to run if there are multiple threads running
// in the process.
class ThreadCacheProcessScopeForTesting {
public:
explicit ThreadCacheProcessScopeForTesting(ThreadSafePartitionRoot* root);
~ThreadCacheProcessScopeForTesting();
ThreadCacheProcessScopeForTesting() = delete;
private:
ThreadSafePartitionRoot* root_ = nullptr;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
bool regular_was_enabled_ = false;
#endif
};
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
namespace partition_alloc::internal {
@ -26,7 +27,7 @@ constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
constexpr size_t kFreeSlotBitmapSize =
(kSuperPageSize / kSmallestBucket) / CHAR_BIT;
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
@ -35,7 +36,7 @@ ReservedFreeSlotBitmapSize() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
CommittedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
@ -44,7 +45,7 @@ CommittedFreeSlotBitmapSize() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerFreeSlotBitmap() {
return ReservedFreeSlotBitmapSize() / PartitionPageSize();
}
@ -52,7 +53,8 @@ NumPartitionPagesPerFreeSlotBitmap() {
#if BUILDFLAG(USE_FREESLOT_BITMAP)
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize();
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
}
#endif

View File

@ -65,9 +65,8 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
root.get(), super_page_count, 0);
if (!super_page_span_start) {
if (!super_page_span_start)
return nullptr;
}
#if defined(ARCH_CPU_64_BITS)
// Mapping the GWP-ASan region in to the lower 32-bits of address space

View File

@ -70,7 +70,7 @@ namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemory(size_t size) {
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
RunPartitionAllocOomCallback();
TerminateBecauseOutOfMemory(size);
PA_IMMEDIATE_CRASH();

View File

@ -49,8 +49,8 @@ namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT(
PARTITION_ALLOC) void OnNoMemory(size_t size);
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
OnNoMemory(size_t size);
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.

View File

@ -19,10 +19,9 @@ void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
namespace internal {
void RunPartitionAllocOomCallback() {
if (g_oom_callback) {
if (g_oom_callback)
g_oom_callback();
}
}
} // namespace internal
} // namespace partition_alloc

View File

@ -113,11 +113,10 @@ uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t actual_offset = address & (alignment - 1);
uintptr_t new_address;
if (actual_offset <= requested_offset) {
if (actual_offset <= requested_offset)
new_address = address + requested_offset - actual_offset;
} else {
else
new_address = address + alignment + requested_offset - actual_offset;
}
PA_DCHECK(new_address >= address);
PA_DCHECK(new_address - address < alignment);
PA_DCHECK(new_address % alignment == requested_offset);
@ -136,9 +135,8 @@ uintptr_t SystemAllocPages(uintptr_t hint,
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret = internal::SystemAllocPagesInternal(
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
if (ret) {
if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
}
return ret;
}
@ -212,17 +210,15 @@ uintptr_t AllocPagesWithAlignOffset(
file_descriptor_for_shared_alloc);
if (ret) {
// If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset) {
if ((ret & align_offset_mask) == align_offset)
return ret;
}
// Free the memory and try again.
FreePages(ret, length);
} else {
// |ret| is null; if this try was unhinted, we're OOM.
if (internal::kHintIsAdvisory || !address) {
if (internal::kHintIsAdvisory || !address)
return 0;
}
}
#if defined(ARCH_CPU_32_BITS)
// For small address spaces, try the first aligned address >= |ret|. Note
@ -372,9 +368,8 @@ bool ReserveAddressSpace(size_t size) {
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address) {
if (!s_reservation_address)
return false;
}
FreePages(s_reservation_address, s_reservation_size);
s_reservation_address = 0;

View File

@ -34,12 +34,12 @@ struct PageAccessibilityConfiguration {
};
#if BUILDFLAG(ENABLE_PKEYS)
constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {}
#else
constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS)
@ -300,7 +300,7 @@ void DiscardSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToSystemPage(uintptr_t address) {
return (address + internal::SystemPageOffsetMask()) &
internal::SystemPageBaseMask();
@ -308,14 +308,14 @@ RoundUpToSystemPage(uintptr_t address) {
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToSystemPage(uintptr_t address) {
return address & internal::SystemPageBaseMask();
}
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) {
return (address + internal::PageAllocationGranularityOffsetMask()) &
internal::PageAllocationGranularityBaseMask();
@ -323,7 +323,7 @@ RoundUpToPageAllocationGranularity(uintptr_t address) {
// Rounds down |address| to the previous multiple of
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) {
return address & internal::PageAllocationGranularityBaseMask();
}

View File

@ -7,8 +7,8 @@
#include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
@ -66,10 +66,10 @@ extern PageCharacteristics page_characteristics;
namespace partition_alloc::internal {
// Forward declaration, implementation below
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity();
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityShift() {
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
@ -96,7 +96,7 @@ PageAllocationGranularityShift() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity() {
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
@ -116,17 +116,17 @@ PageAllocationGranularity() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityOffsetMask() {
return PageAllocationGranularity() - 1;
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityBaseMask() {
return ~PageAllocationGranularityOffsetMask();
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageShift() {
// On Windows allocation granularity is higher than the page size. This comes
// into play when reserving address space range (allocation granularity),
@ -138,7 +138,7 @@ SystemPageShift() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageSize() {
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
@ -151,12 +151,12 @@ SystemPageSize() {
#endif
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageOffsetMask() {
return SystemPageSize() - 1;
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageBaseMask() {
return ~SystemPageOffsetMask();
}

View File

@ -111,7 +111,8 @@ uintptr_t SystemAllocPagesInternal(
}
uint64_t address;
status = zx::vmar::root_self()->map(options, vmar_offset, vmo,
status =
zx::vmar::root_self()->map(options, vmar_offset, vmo,
/*vmo_offset=*/0, length, &address);
if (status != ZX_OK) {
// map() is expected to fail if |hint| is set to an already-in-use location.

View File

@ -130,9 +130,8 @@ bool UseMapJit() {
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement) {
if (!jit_entitlement)
return false;
}
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
@ -249,9 +248,8 @@ void SetSystemPagesAccessInternal(
//
// In this case, we are almost certainly bumping into the sandbox limit, mark
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) {
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
OOM_CRASH(length);
}
PA_PCHECK(0 == ret);
}
@ -367,10 +365,9 @@ bool TryRecommitSystemPagesInternal(
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
if (!ok) {
if (!ok)
return false;
}
}
#if BUILDFLAG(IS_APPLE)
// On macOS, to update accounting, we need to make another syscall. For more

View File

@ -72,9 +72,8 @@ void* VirtualAllocWithRetry(void* address,
// Only retry for commit failures. If this is an address space problem
// (e.g. caller asked for an address which is not available), this is
// unlikely to be resolved by waiting.
if (ret || !should_retry || !IsOutOfMemory(GetLastError())) {
if (ret || !should_retry || !IsOutOfMemory(GetLastError()))
break;
}
Sleep(kDelayMs);
}
@ -143,9 +142,8 @@ bool TrySetSystemPagesAccessInternal(
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible) {
PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
}
// Call the retry path even though this function can fail, because callers of
// this are likely to crash the process when this function fails, and we don't
// want that for transient failures.
@ -169,9 +167,8 @@ void SetSystemPagesAccessInternal(
if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT) {
if (error == ERROR_COMMITMENT_LIMIT)
OOM_CRASH(length);
}
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
PA_CHECK(ERROR_SUCCESS == error);

View File

@ -37,16 +37,16 @@ namespace internal {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public:
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_;
}
#else
PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
return kRegularPoolBaseMask;
}
#endif
PA_ALWAYS_INLINE static std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@ -76,10 +76,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
}
return std::make_pair(pool, address - base);
}
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
return kConfigurablePoolMaxSize;
}
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
return kConfigurablePoolMinSize;
}
@ -100,7 +100,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static void UninitForTesting();
static void UninitConfigurablePoolForTesting();
PA_ALWAYS_INLINE static bool IsInitialized() {
static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The
// configurable and pkey pool are initialized separately.
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
@ -112,19 +112,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return false;
}
PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
return setup_.configurable_pool_base_address_ !=
kUninitializedPoolBaseAddress;
}
#if BUILDFLAG(ENABLE_PKEYS)
PA_ALWAYS_INLINE static bool IsPkeyPoolInitialized() {
static PA_ALWAYS_INLINE bool IsPkeyPoolInitialized() {
return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
}
#endif
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else
@ -134,12 +134,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
setup_.regular_pool_base_address_;
}
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
return setup_.regular_pool_base_address_;
}
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if PA_CONFIG(GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsInCorePools(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
#else
@ -166,40 +166,40 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return ret;
}
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t CorePoolsSize() {
static PA_ALWAYS_INLINE size_t CorePoolsSize() {
return RegularPoolSize() * 2;
}
#else
PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
static PA_ALWAYS_INLINE constexpr size_t CorePoolsSize() {
return RegularPoolSize() * 2;
}
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#endif // PA_CONFIG(GLUE_CORE_POOLS)
PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
}
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) ==
setup_.configurable_pool_base_address_;
}
PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
return setup_.configurable_pool_base_address_;
}
#if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInPkeyPool(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsInPkeyPool(uintptr_t address) {
return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
}
#endif
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
if (pool == kRegularPoolHandle) {
return regular_pool_shadow_offset_;
} else if (pool == kBRPPoolHandle) {
@ -222,20 +222,20 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
private:
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t RegularPoolSize();
PA_ALWAYS_INLINE static size_t BRPPoolSize();
static PA_ALWAYS_INLINE size_t RegularPoolSize();
static PA_ALWAYS_INLINE size_t BRPPoolSize();
#else
// The pool sizes should be as large as maximum whenever possible.
PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
return kRegularPoolSize;
}
PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
return kBRPPoolSize;
}
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS)
PA_ALWAYS_INLINE static constexpr size_t PkeyPoolSize() {
constexpr static PA_ALWAYS_INLINE size_t PkeyPoolSize() {
return kPkeyPoolSize;
}
#endif

View File

@ -68,15 +68,12 @@ declare_args() {
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
# can create and use PA partitions explicitly.
enable_pointer_compression_support = false
# Enables a bounds check when two pointers (at least one being raw_ptr) are
# subtracted (if supported by the underlying implementation).
enable_pointer_subtraction_check = false
}
declare_args() {
# Build support for Use-after-Free protection via BackupRefPtr (BRP),
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
# Build support for Use-after-Free protection via BackupRefPtr (BRP) or
# MTECheckedPtr, and switch the raw_ptr<T> implementation to RawPtrBackupRefImpl
# and MTECheckedPtrImp, respectively. They're mutually exclusive.
#
# These are effective only for memory allocated from PartitionAlloc, so it is
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
@ -87,12 +84,17 @@ declare_args() {
# partition_alloc::PartitionOptions::BackupRefPtr::kEnabled.
enable_backup_ref_ptr_support =
use_partition_alloc && enable_backup_ref_ptr_support_default
enable_mte_checked_ptr_support =
use_partition_alloc && enable_mte_checked_ptr_support_default
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
# binary size. This flag can be used to enable it for official builds too.
force_enable_raw_ptr_exclusion = false
}
assert(!(enable_backup_ref_ptr_support && enable_mte_checked_ptr_support),
"MTECheckedPtrSupport conflicts with BRPSupport.")
assert(!enable_pointer_compression_support || glue_core_pools,
"Pointer compression relies on core pools being contiguous.")
@ -131,24 +133,16 @@ declare_args() {
enable_backup_ref_ptr_slow_checks =
enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support
# Enable the feature flag required to activate backup ref pointers. That is to
# say `PartitionAllocBackupRefPtr`.
#
# This is meant to be used primarily on bots. It is much easier to override
# the feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_backup_ref_ptr_feature_flag = false
enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enable the feature flag required to check for dangling pointers. That is to
# say `PartitionAllocDanglingPtr`.
# Enable the feature flags required to check for dangling pointers. That is to
# say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
#
# This is meant to be used primarily on bots. It is much easier to override
# the feature flags using a binary flag instead of updating multiple bots's
# This is meant to be used on bots only. It is much easier to override the
# feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flag = false
enable_dangling_raw_ptr_feature_flags_for_bots = false
# Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid
@ -160,7 +154,6 @@ declare_args() {
# to go through build_overrides
enable_dangling_raw_ptr_perf_experiment = false
# Set to `enable_backup_ref_ptr_support && has_64_bit_pointers` when enabling.
backup_ref_ptr_poison_oob_ptr = false
}
@ -194,6 +187,7 @@ if (is_nacl) {
if (!use_partition_alloc) {
use_partition_alloc_as_malloc = false
enable_backup_ref_ptr_support = false
enable_mte_checked_ptr_support = false
use_asan_backup_ref_ptr = false
use_asan_unowned_ptr = false
use_hookable_raw_ptr = false
@ -201,7 +195,6 @@ if (!use_partition_alloc) {
enable_backup_ref_ptr_slow_checks = false
enable_dangling_raw_ptr_checks = false
enable_dangling_raw_ptr_perf_experiment = false
enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false
use_starscan = false
}
@ -234,8 +227,6 @@ assert(
assert(
enable_backup_ref_ptr_support || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if BackupRefPtr isn't enabled at all")
assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if pointers are only 32-bit")
# AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr.
assert(

View File

@ -13,6 +13,7 @@
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"

View File

@ -7,6 +7,19 @@
#include "build/build_config.h"
// This is a wrapper around `__has_cpp_attribute`, which can be used to test for
// the presence of an attribute. In case the compiler does not support this
// macro it will simply evaluate to 0.
//
// References:
// https://wg21.link/sd6#testing-for-the-presence-of-an-attribute-__has_cpp_attribute
// https://wg21.link/cpp.cond#:__has_cpp_attribute
#if defined(__has_cpp_attribute)
#define PA_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
#define PA_HAS_CPP_ATTRIBUTE(x) 0
#endif
// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
#if defined(__has_attribute)
#define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
@ -24,9 +37,7 @@
// Annotate a function indicating it should not be inlined.
// Use like:
// NOINLINE void DoStuff() { ... }
#if defined(__clang__) && PA_HAS_ATTRIBUTE(noinline)
#define PA_NOINLINE [[clang::noinline]]
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(noinline)
#if defined(COMPILER_GCC) || defined(__clang__)
#define PA_NOINLINE __attribute__((noinline))
#elif defined(COMPILER_MSVC)
#define PA_NOINLINE __declspec(noinline)
@ -34,10 +45,7 @@
#define PA_NOINLINE
#endif
#if defined(__clang__) && defined(NDEBUG) && PA_HAS_ATTRIBUTE(always_inline)
#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
#elif defined(COMPILER_GCC) && defined(NDEBUG) && \
PA_HAS_ATTRIBUTE(always_inline)
#if defined(COMPILER_GCC) && defined(NDEBUG)
#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
#elif defined(COMPILER_MSVC) && defined(NDEBUG)
#define PA_ALWAYS_INLINE __forceinline
@ -54,42 +62,72 @@
// Use like:
// void NOT_TAIL_CALLED FooBar();
#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
#define PA_NOT_TAIL_CALLED [[clang::not_tail_called]]
#define PA_NOT_TAIL_CALLED __attribute__((not_tail_called))
#else
#define PA_NOT_TAIL_CALLED
#endif
// Specify memory alignment for structs, classes, etc.
// Use like:
// class PA_ALIGNAS(16) MyClass { ... }
// PA_ALIGNAS(16) int array[4];
// class ALIGNAS(16) MyClass { ... }
// ALIGNAS(16) int array[4];
//
// In most places you can use the C++11 keyword "alignas", which is preferred.
//
// Historically, compilers had trouble mixing __attribute__((...)) syntax with
// alignas(...) syntax. However, at least Clang is very accepting nowadays. It
// may be that this macro can be removed entirely.
#if defined(__clang__)
#define PA_ALIGNAS(byte_alignment) alignas(byte_alignment)
#elif defined(COMPILER_MSVC)
// But compilers have trouble mixing __attribute__((...)) syntax with
// alignas(...) syntax.
//
// Doesn't work in clang or gcc:
// struct alignas(16) __attribute__((packed)) S { char c; };
// Works in clang but not gcc:
// struct __attribute__((packed)) alignas(16) S2 { char c; };
// Works in clang and gcc:
// struct alignas(16) S3 { char c; } __attribute__((packed));
//
// There are also some attributes that must be specified *before* a class
// definition: visibility (used for exporting functions/classes) is one of
// these attributes. This means that it is not possible to use alignas() with a
// class that is marked as exported.
#if defined(COMPILER_MSVC)
#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(aligned)
#elif defined(COMPILER_GCC)
#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
#endif
// Tells the compiler a function is using a printf-style format string.
// In case the compiler supports it NO_UNIQUE_ADDRESS evaluates to the C++20
// attribute [[no_unique_address]]. This allows annotating data members so that
// they need not have an address distinct from all other non-static data members
// of its class.
//
// References:
// * https://en.cppreference.com/w/cpp/language/attributes/no_unique_address
// * https://wg21.link/dcl.attr.nouniqueaddr
#if PA_HAS_CPP_ATTRIBUTE(no_unique_address)
#define PA_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define PA_NO_UNIQUE_ADDRESS
#endif
// Tell the compiler a function is using a printf-style format string.
// |format_param| is the one-based index of the format string parameter;
// |dots_param| is the one-based index of the "..." parameter.
// For v*printf functions (which take a va_list), pass 0 for dots_param.
// (This is undocumented but matches what the system C headers do.)
// For member functions, the implicit this parameter counts as index 1.
#if (defined(COMPILER_GCC) || defined(__clang__)) && PA_HAS_ATTRIBUTE(format)
#if defined(COMPILER_GCC) || defined(__clang__)
#define PA_PRINTF_FORMAT(format_param, dots_param) \
__attribute__((format(printf, format_param, dots_param)))
#else
#define PA_PRINTF_FORMAT(format_param, dots_param)
#endif
// WPRINTF_FORMAT is the same, but for wide format strings.
// This doesn't appear to yet be implemented in any compiler.
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
#define PA_WPRINTF_FORMAT(format_param, dots_param)
// If available, it would look like:
// __attribute__((format(wprintf, format_param, dots_param)))
// Sanitizers annotations.
#if PA_HAS_ATTRIBUTE(no_sanitize)
#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
@ -106,10 +144,27 @@
// Use this to annotate code that deliberately reads uninitialized data, for
// example a GC scavenging root set pointers from the stack.
#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
// Check a memory region for initializedness, as if it was being used here.
// If any bits are uninitialized, crash with an MSan report.
// Use this to sanitize data which MSan won't be able to track, e.g. before
// passing data to another process via shared memory.
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
__msan_check_mem_is_initialized(p, size)
#else // MEMORY_SANITIZER
#define PA_MSAN_UNPOISON(p, size)
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
#endif // MEMORY_SANITIZER
// Macro useful for writing cross-platform function pointers.
#if !defined(PA_CDECL)
#if BUILDFLAG(IS_WIN)
#define PA_CDECL __cdecl
#else // BUILDFLAG(IS_WIN)
#define PA_CDECL
#endif // BUILDFLAG(IS_WIN)
#endif // !defined(PA_CDECL)
// Macro for hinting that an expression is likely to be false.
#if !defined(PA_UNLIKELY)
#if defined(COMPILER_GCC) || defined(__clang__)
@ -127,6 +182,23 @@
#endif // defined(COMPILER_GCC)
#endif // !defined(PA_LIKELY)
// Compiler feature-detection.
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
#if defined(__has_feature)
#define PA_HAS_FEATURE(FEATURE) __has_feature(FEATURE)
#else
#define PA_HAS_FEATURE(FEATURE) 0
#endif
#if defined(COMPILER_GCC)
#define PA_PRETTY_FUNCTION __PRETTY_FUNCTION__
#elif defined(COMPILER_MSVC)
#define PA_PRETTY_FUNCTION __FUNCSIG__
#else
// See https://en.cppreference.com/w/c/language/function_definition#func
#define PA_PRETTY_FUNCTION __func__
#endif
#if !defined(PA_CPU_ARM_NEON)
#if defined(__arm__)
#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
@ -145,6 +217,63 @@
#endif
#endif
#if defined(__clang__) && PA_HAS_ATTRIBUTE(uninitialized)
// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
// the specified variable.
// Library-wide alternative is
// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn
// file.
//
// See "init_stack_vars" in build/config/compiler/BUILD.gn and
// http://crbug.com/977230
// "init_stack_vars" is enabled for non-official builds and we hope to enable it
// in official build in 2020 as well. The flag writes fixed pattern into
// uninitialized parts of all local variables. In rare cases such initialization
// is undesirable and attribute can be used:
// 1. Degraded performance
// In most cases compiler is able to remove additional stores. E.g. if memory is
// never accessed or properly initialized later. Preserved stores mostly will
// not affect program performance. However if compiler failed on some
// performance critical code we can get a visible regression in a benchmark.
// 2. memset, memcpy calls
// Compiler may replaces some memory writes with memset or memcpy calls. This is
// not -ftrivial-auto-var-init specific, but it can happen more likely with the
// flag. It can be a problem if code is not linked with C run-time library.
//
// Note: The flag is security risk mitigation feature. So in future the
// attribute uses should be avoided when possible. However to enable this
// mitigation on the most of the code we need to be less strict now and minimize
// number of exceptions later. So if in doubt feel free to use attribute, but
// please document the problem for someone who is going to cleanup it later.
// E.g. platform, bot, benchmark or test name in patch description or next to
// the attribute.
#define PA_STACK_UNINITIALIZED __attribute__((uninitialized))
#else
#define PA_STACK_UNINITIALIZED
#endif
// Attribute "no_stack_protector" disables -fstack-protector for the specified
// function.
//
// "stack_protector" is enabled on most POSIX builds. The flag adds a canary
// to each stack frame, which on function return is checked against a reference
// canary. If the canaries do not match, it's likely that a stack buffer
// overflow has occurred, so immediately crashing will prevent exploitation in
// many cases.
//
// In some cases it's desirable to remove this, e.g. on hot functions, or if
// we have purposely changed the reference canary.
#if defined(COMPILER_GCC) || defined(__clang__)
#if PA_HAS_ATTRIBUTE(__no_stack_protector__)
#define PA_NO_STACK_PROTECTOR __attribute__((__no_stack_protector__))
#else
#define PA_NO_STACK_PROTECTOR \
__attribute__((__optimize__("-fno-stack-protector")))
#endif
#else
#define PA_NO_STACK_PROTECTOR
#endif
// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
// to Clang which control what code paths are statically analyzed,
// and is meant to be used in conjunction with assert & assert-like functions.
@ -214,6 +343,15 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#define PA_TRIVIAL_ABI
#endif
// Marks a member function as reinitializing a moved-from variable.
// See also
// https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
#if defined(__clang__) && PA_HAS_ATTRIBUTE(reinitializes)
#define PA_REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
#else
#define PA_REINITIALIZES_AFTER_MOVE
#endif
// Requires constant initialization. See constinit in C++20. Allows to rely on a
// variable being initialized before execution, and not requiring a global
// constructor.
@ -225,8 +363,10 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#endif
#if defined(__clang__)
#define PA_GSL_OWNER [[gsl::Owner]]
#define PA_GSL_POINTER [[gsl::Pointer]]
#else
#define PA_GSL_OWNER
#define PA_GSL_POINTER
#endif

View File

@ -6,6 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {

View File

@ -1,33 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
namespace partition_alloc::internal::base {
// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
// supported C++ version is C++17.
#if defined(__cpp_lib_is_constant_evaluated) && \
__cpp_lib_is_constant_evaluated >= 201811L
#include <type_traits>
using std::is_constant_evaluated;
#else
// Implementation of C++20's std::is_constant_evaluated.
//
// References:
// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
// - https://wg21.link/meta.const.eval
constexpr bool is_constant_evaluated() noexcept {
return __builtin_is_constant_evaluated();
}
#endif
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_

View File

@ -8,6 +8,7 @@
#include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
namespace partition_alloc::internal::base::debug {

View File

@ -107,6 +107,7 @@
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
// Windows-style drive letter support and pathname separator characters can be

View File

@ -15,6 +15,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
#include "build/build_config.h"

View File

@ -0,0 +1,22 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
namespace base {
class LapTimer;
} // namespace base
namespace partition_alloc::internal::base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::base::LapTimer;
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)

View File

@ -10,6 +10,7 @@
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
namespace partition_alloc::internal::base {

View File

@ -10,6 +10,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
namespace partition_alloc {

View File

@ -8,6 +8,7 @@
#include <errno.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {

View File

@ -14,6 +14,7 @@
#include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "build/build_config.h"

View File

@ -13,6 +13,7 @@
#include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "build/build_config.h"

View File

@ -13,6 +13,7 @@
#include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)

View File

@ -70,6 +70,7 @@
#include "base/allocator/partition_allocator/chromeos_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"

View File

@ -134,11 +134,10 @@ struct PA_DEBUGKV_ALIGN DebugKv {
for (int index = 0; index < 8; index++) {
k[index] = key[index];
if (key[index] == '\0') {
if (key[index] == '\0')
break;
}
}
}
};
} // namespace partition_alloc::internal

View File

@ -168,8 +168,27 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8);
#endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#define PA_CONFIG_USE_OOB_POISON() 1
#else
#define PA_CONFIG_USE_OOB_POISON() 0
#endif
// Build MTECheckedPtr code.
//
// Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
static_assert(sizeof(void*) == 8);
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
#else
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 0
#endif
// Specifies whether allocation extras need to be added.
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#define PA_CONFIG_EXTRAS_REQUIRED() 1
#else
#define PA_CONFIG_EXTRAS_REQUIRED() 0
@ -314,7 +333,8 @@ constexpr bool kUseLazyCommit = false;
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#error "Dynamically selected pool size is currently not supported"
#endif
#if PA_CONFIG(HAS_MEMORY_TAGGING)
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) || \
PA_CONFIG(HAS_MEMORY_TAGGING)
// TODO(1376980): Address MTE once it's enabled.
#error "Compressed pointers don't support tag in the upper bits"
#endif

View File

@ -82,36 +82,36 @@ constexpr size_t kPartitionCachelineSize = 64;
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() {
return 16; // 64 KiB
}
#elif defined(ARCH_CPU_PPC64)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() {
return 18; // 256 KiB
}
#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() {
return PageAllocationGranularityShift() + 2;
}
#else
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() {
return 14; // 16 KiB
}
#endif
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageSize() {
return 1 << PartitionPageShift();
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageOffsetMask() {
return PartitionPageSize() - 1;
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageBaseMask() {
return ~PartitionPageOffsetMask();
}
@ -131,18 +131,18 @@ constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
// dirty a private page, which is very wasteful if we never actually store
// objects there.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumSystemPagesPerPartitionPage() {
return PartitionPageSize() >> SystemPageShift();
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxSystemPagesPerRegularSlotSpan() {
return NumSystemPagesPerPartitionPage() *
kMaxPartitionPagesPerRegularSlotSpan;
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxRegularSlotSpanSize() {
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
}
@ -179,6 +179,7 @@ constexpr size_t kHighThresholdForAlternateDistribution =
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | TagBitmap |
// | Free Slot Bitmap |
// | *Scan State Bitmap |
// | Slot span |
@ -188,6 +189,8 @@ constexpr size_t kHighThresholdForAlternateDistribution =
// | Guard pages (16 KiB) |
// +-----------------------+
//
// TagBitmap is only present when
// PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) is true.
// Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
// Bitmap is inserted for partitions that may have quarantine enabled.
//
@ -329,23 +332,23 @@ PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
}
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerSuperPage() {
return kSuperPageSize >> PartitionPageShift();
}
PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool;
}
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools.
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
return kSuperPageSize;
}
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift;
}
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
@ -353,18 +356,18 @@ PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
// allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularity() {
return std::max(PageAllocationGranularity(), PartitionPageSize());
}
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift());
}
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() {
return DirectMapAllocationGranularity() - 1;
}
@ -412,7 +415,7 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// The definition of MaxDirectMapped does only depend on constants that are
// unconditionally constexpr. Therefore it is not necessary to use
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
constexpr PA_ALWAYS_INLINE size_t MaxDirectMapped() {
// Subtract kSuperPageSize to accommodate for granularity inside
// PartitionRoot::GetDirectMapReservationSize.
return (1UL << 31) - kSuperPageSize;
@ -500,6 +503,18 @@ using ::partition_alloc::internal::kSuperPageSize;
using ::partition_alloc::internal::MaxDirectMapped;
using ::partition_alloc::internal::PartitionPageSize;
// Return values to indicate where a pointer is pointing relative to the bounds
// of an allocation.
enum class PtrPosWithinAlloc {
// When PA_USE_OOB_POISON is disabled, end-of-allocation pointers are also
// considered in-bounds.
kInBounds,
#if PA_CONFIG(USE_OOB_POISON)
kAllocEnd,
#endif
kFarOOB
};
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_

View File

@ -51,9 +51,27 @@ void CheckThatSlotOffsetIsZero(uintptr_t address);
// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
// Smaller types are also allowed.
template <typename Z>
static constexpr bool is_offset_type =
static constexpr bool offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
struct PtrDelta {
Z delta_in_bytes;
#if PA_CONFIG(USE_OOB_POISON)
// Size of the element type referenced by the pointer
size_t type_size;
#endif
constexpr PtrDelta(Z delta_in_bytes, size_t type_size)
: delta_in_bytes(delta_in_bytes)
#if PA_CONFIG(USE_OOB_POISON)
,
type_size(type_size)
#endif
{
}
};
} // namespace internal
class PartitionStatsDumper;

View File

@ -71,32 +71,28 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address,
size_t size,
const char* type_name) {
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) {
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name);
}
}
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
void** out,
unsigned int flags,
size_t size,
const char* type_name) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name);
}
return false;
}
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) {
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address);
}
}
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) {
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address);
}
return false;
}

View File

@ -31,6 +31,8 @@
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -235,6 +237,10 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
PartitionPage<thread_safe>* page = nullptr;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
const PartitionTag tag = root->GetNewPartitionTag();
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
{
// Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several
@ -449,6 +455,10 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
map_extent->reservation_size = reservation_size;
map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &metadata->bucket;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
DirectMapPartitionTagSetValue(slot_start, tag);
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
}
root->lock_.AssertAcquired();
@ -692,6 +702,28 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
// span.
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PA_DCHECK(root->next_tag_bitmap_page);
uintptr_t next_tag_bitmap_page =
base::bits::AlignUp(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t super_page =
reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
uintptr_t tag_bitmap = super_page + PartitionPageSize();
PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
#endif
SetSystemPagesAccess(root->next_tag_bitmap_page,
next_tag_bitmap_page - root->next_tag_bitmap_page,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kReadWrite));
root->next_tag_bitmap_page = next_tag_bitmap_page;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
return slot_span;
}
@ -757,7 +789,9 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
root->next_super_page = super_page + kSuperPageSize;
uintptr_t state_bitmap =
super_page + PartitionPageSize() +
(is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize());
(is_direct_mapped()
? 0
: ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize());
#if BUILDFLAG(USE_STARSCAN)
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
const size_t state_bitmap_reservation_size =
@ -862,6 +896,19 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
payload < SuperPagesEndFromExtent(current_extent));
}
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// `root->next_partition_page` currently points at the start of the
// super page payload. We point `root->next_tag_bitmap_page` to the
// corresponding point in the tag bitmap and let the caller
// (slot span allocation) take care of the rest.
root->next_tag_bitmap_page =
base::bits::AlignDown(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
<< "tag bitmap can never intrude on metadata partition page";
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
// sure to register the super-page after it has been fully initialized.
@ -883,7 +930,8 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
#if BUILDFLAG(USE_FREESLOT_BITMAP)
// Commit the pages for freeslot bitmap.
if (!is_direct_mapped()) {
uintptr_t freeslot_bitmap_addr = super_page + PartitionPageSize();
uintptr_t freeslot_bitmap_addr =
super_page + PartitionPageSize() + ReservedTagBitmapSize();
PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
ScopedSyscallTimer timer{root};
RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
@ -969,10 +1017,14 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
}
if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize &&
root->memory_tagging_enabled())) {
root->IsMemoryTaggingEnabled())) {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size);
}
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(return_slot, slot_size,
root->GetNewPartitionTag());
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
@ -989,6 +1041,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
}
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(next_slot, slot_size,
root->GetNewPartitionTag());
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry);

View File

@ -73,8 +73,8 @@ struct PartitionBucket {
// |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in SlotSpanMetadata.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
SlowPathAlloc(PartitionRoot<thread_safe>* root,
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
PA_NOINLINE uintptr_t SlowPathAlloc(PartitionRoot<thread_safe>* root,
unsigned int flags,
size_t raw_size,
size_t slot_span_alignment,
@ -87,9 +87,8 @@ struct PartitionBucket {
// subsequent PartitionPage to store the raw size. It isn't only metadata
// space though, slot spans that have more than one slot can't have raw size
// stored, because we wouldn't know which slot it applies to.
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize()))
return false;
}
PA_DCHECK((slot_size % SystemPageSize()) == 0);
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);

View File

@ -25,17 +25,15 @@ namespace partition_alloc::internal {
// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
// for the sub_order_index).
constexpr uint8_t OrderIndexShift(uint8_t order) {
if (order < kNumBucketsPerOrderBits + 1) {
if (order < kNumBucketsPerOrderBits + 1)
return 0;
}
return order - (kNumBucketsPerOrderBits + 1);
}
constexpr size_t OrderSubIndexMask(uint8_t order) {
if (order == kBitsPerSizeT) {
if (order == kBitsPerSizeT)
return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
}
return ((static_cast<size_t>(1) << order) - 1) >>
(kNumBucketsPerOrderBits + 1);
@ -106,10 +104,10 @@ inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
// The class used to generate the bucket lookup table at compile-time.
class BucketIndexLookup final {
public:
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForDenserBuckets(
PA_ALWAYS_INLINE constexpr static uint16_t GetIndexForDenserBuckets(
size_t size);
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexFor8Buckets(size_t size);
PA_ALWAYS_INLINE static constexpr uint16_t GetIndex(size_t size);
PA_ALWAYS_INLINE constexpr static uint16_t GetIndexFor8Buckets(size_t size);
PA_ALWAYS_INLINE constexpr static uint16_t GetIndex(size_t size);
constexpr BucketIndexLookup() {
constexpr uint16_t sentinel_bucket_index = kNumBuckets;
@ -264,12 +262,11 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
//
// We also do not want to go about the index for the max bucketed size.
if (size > kAlignment * kNumBucketsPerOrder &&
index < GetIndexFor8Buckets(kMaxBucketed)) {
index < GetIndexFor8Buckets(kMaxBucketed))
return RoundUpToOdd(index);
} else {
else
return index;
}
}
// static
PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
@ -291,9 +288,8 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
//
// So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
// Distribution A, but to the 2^11 bucket under Distribution B.
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) {
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
}
return BucketIndexLookup::GetIndexForDenserBuckets(size);
}

View File

@ -23,16 +23,14 @@ inline constexpr unsigned char kCookieValue[kCookieSize] = {
constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
PA_DCHECK(*cookie_ptr == kCookieValue[i]);
}
}
PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i];
}
}
#else

View File

@ -41,10 +41,10 @@ class PartitionFreelistEntry;
class EncodedPartitionFreelistEntryPtr {
private:
PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr(
explicit PA_ALWAYS_INLINE constexpr EncodedPartitionFreelistEntryPtr(
std::nullptr_t)
: encoded_(Transform(0)) {}
PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr)
explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
// The encoded pointer stays MTE-tagged.
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
@ -58,7 +58,7 @@ class EncodedPartitionFreelistEntryPtr {
encoded_ = encoded;
}
PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
explicit PA_ALWAYS_INLINE constexpr operator bool() const { return encoded_; }
// Transform() works the same in both directions, so can be used for
// encoding and decoding.
@ -90,7 +90,7 @@ class EncodedPartitionFreelistEntryPtr {
// the rationale and mechanism, respectively.
class PartitionFreelistEntry {
private:
constexpr explicit PartitionFreelistEntry(std::nullptr_t)
explicit constexpr PartitionFreelistEntry(std::nullptr_t)
: encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
,
@ -121,13 +121,13 @@ class PartitionFreelistEntry {
// Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it as null-terminated.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
void* slot_start_tagged) {
// |slot_start_tagged| is MTE-tagged.
auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
return entry;
}
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) {
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
}
@ -138,7 +138,7 @@ class PartitionFreelistEntry {
// This freelist is built for the purpose of thread-cache. This means that we
// can't perform a check that this and the next pointer belong to the same
// super page, as thread-cache spans may chain slots across super pages.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache(
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitForThreadCache(
uintptr_t slot_start,
PartitionFreelistEntry* next) {
auto* entry =
@ -151,7 +151,7 @@ class PartitionFreelistEntry {
//
// This is for testing purposes only! |make_shadow_match| allows you to choose
// if the shadow matches the next pointer properly or is trash.
PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
void* next,
bool make_shadow_match) {
new (SlotStartAddr2Ptr(slot_start))
@ -225,7 +225,7 @@ class PartitionFreelistEntry {
size_t extra,
bool for_thread_cache) const;
PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here,
static PA_ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here,
const PartitionFreelistEntry* next,
bool for_thread_cache) {
// Don't allow the freelist to be blindly followed to any location.
@ -260,13 +260,12 @@ class PartitionFreelistEntry {
bool not_in_metadata =
(next_address & kSuperPageOffsetMask) >= PartitionPageSize();
if (for_thread_cache) {
if (for_thread_cache)
return shadow_ptr_ok & not_in_metadata;
} else {
else
return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
not_in_metadata;
}
}
EncodedPartitionFreelistEntryPtr encoded_next_;
// This is intended to detect unintentional corruptions of the freelist.
@ -298,9 +297,8 @@ PartitionFreelistEntry::GetNextInternal(size_t extra,
bool for_thread_cache) const {
// GetNext() can be called on discarded memory, in which case |encoded_next_|
// is 0, and none of the checks apply. Don't prefetch nullptr either.
if (IsEncodedNextPtrZero()) {
if (IsEncodedNextPtrZero())
return nullptr;
}
auto* ret = encoded_next_.Decode();
// We rely on constant propagation to remove the branches coming from

View File

@ -13,20 +13,20 @@ namespace partition_alloc::internal {
OomFunction g_oom_handling_function = nullptr;
PA_NOINLINE PA_NOT_TAIL_CALLED void PartitionExcessiveAllocationSize(
size_t size) {
PA_NOINLINE void PA_NOT_TAIL_CALLED
PartitionExcessiveAllocationSize(size_t size) {
PA_NO_CODE_FOLDING();
OOM_CRASH(size);
}
#if !defined(ARCH_CPU_64_BITS)
PA_NOINLINE PA_NOT_TAIL_CALLED void
PA_NOINLINE void PA_NOT_TAIL_CALLED
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
PA_NO_CODE_FOLDING();
OOM_CRASH(size);
}
[[noreturn]] PA_NOT_TAIL_CALLED PA_NOINLINE void
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
PA_NO_CODE_FOLDING();
OOM_CRASH(virtual_size);

View File

@ -23,8 +23,8 @@ namespace internal {
// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
extern OomFunction g_oom_handling_function;
[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
PARTITION_ALLOC) void PartitionExcessiveAllocationSize(size_t size);
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE
void PartitionExcessiveAllocationSize(size_t size);
#if !defined(ARCH_CPU_64_BITS)
[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(

View File

@ -105,9 +105,8 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
root->global_empty_slot_span_ring[current_index];
// The slot span might well have been re-activated, filled up, etc. before we
// get around to looking at it here.
if (slot_span_to_decommit) {
if (slot_span_to_decommit)
slot_span_to_decommit->DecommitIfPossible(root);
}
// We put the empty slot span on our global list of "slot spans that were once
// empty", thus providing it a bit of breathing room to get re-used before we
@ -117,9 +116,8 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
empty_cache_index_ = current_index;
in_empty_cache_ = 1;
++current_index;
if (current_index == root->global_empty_slot_span_ring_size) {
if (current_index == root->global_empty_slot_span_ring_size)
current_index = 0;
}
root->global_empty_slot_span_ring_index = current_index;
// Avoid wasting too much memory on empty slot spans. Note that we only divide
@ -187,9 +185,8 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
// chances of it being filled up again. The old current slot span will be
// the next slot span.
PA_DCHECK(!next_slot_span);
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
next_slot_span = bucket->active_slot_spans_head;
}
bucket->active_slot_spans_head = this;
PA_CHECK(bucket->num_full_slot_spans); // Underflow.
--bucket->num_full_slot_spans;
@ -206,14 +203,12 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
#endif
// If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation.
if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
if (PA_LIKELY(this == bucket->active_slot_spans_head))
bucket->SetNewActiveSlotSpan();
}
PA_DCHECK(bucket->active_slot_spans_head != this);
if (CanStoreRawSize()) {
if (CanStoreRawSize())
SetRawSize(0);
}
RegisterEmpty();
}
@ -264,10 +259,9 @@ void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
in_empty_cache_ = 0;
if (is_empty()) {
if (is_empty())
Decommit(root);
}
}
template <bool thread_safe>
void SlotSpanMetadata<thread_safe>::SortFreelist() {
@ -301,11 +295,10 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
if (!head) {
if (!head)
head = entry;
} else {
else
back->SetNext(entry);
}
back = entry;
}

View File

@ -25,6 +25,8 @@
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -183,8 +185,8 @@ struct SlotSpanMetadata {
// Public API
// Note the matching Alloc() functions are in PartitionPage.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
size_t number_of_freed);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
PA_NOINLINE void FreeSlowPath(size_t number_of_freed);
PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
PA_ALWAYS_INLINE void Free(uintptr_t ptr);
// Appends the passed freelist to the slot-span's freelist. Please note that
@ -226,6 +228,10 @@ struct SlotSpanMetadata {
PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
PA_ALWAYS_INLINE size_t GetRawSize() const;
// Only meaningful when `this` refers to a slot span in a direct map
// bucket.
PA_ALWAYS_INLINE PartitionTag* DirectMapMTETag();
PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
return freelist_head;
}
@ -345,6 +351,13 @@ struct SubsequentPageMetadata {
// the first one is used to store slot information, but the second one is
// available for extra information)
size_t raw_size;
// Specific to when `this` is used in a direct map bucket. Since direct
// maps don't have as many tags as the typical normal bucket slot span,
// we can get away with just hiding the sole tag in here.
//
// See `//base/memory/mtecheckedptr.md` for details.
PartitionTag direct_map_tag;
};
// Each partition page has metadata associated with it. The metadata of the
@ -441,14 +454,14 @@ PartitionSuperPageToExtent(uintptr_t super_page) {
// Size that should be reserved for state bitmap (if present) inside a super
// page. Elements of a super page are partition-page-aligned, hence the returned
// size is a multiple of partition page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
}
// Size that should be committed for state bitmap (if present) inside a super
// page. It is a multiple of system page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
CommittedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
}
@ -458,7 +471,8 @@ CommittedStateBitmapSize() {
PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
(IsManagedByNormalBuckets(super_page)
? ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize()
: 0);
}
@ -470,18 +484,28 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
#else // BUILDFLAG(USE_STARSCAN)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() {
return 0ull;
}
#endif // BUILDFLAG(USE_STARSCAN)
// Returns the address of the tag bitmap of the `super_page`. Caller must ensure
// that bitmap exists.
PA_ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
PA_DCHECK(IsReservationStart(super_page));
// Skip over the guard pages / metadata.
return super_page + PartitionPageSize();
}
PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
bool with_quarantine) {
return PartitionPageSize() +
(is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
(is_managed_by_normal_buckets
? (ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize())
: 0) +
(with_quarantine ? ReservedStateBitmapSize() : 0);
}
@ -717,6 +741,15 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
return subsequent_page_metadata->raw_size;
}
template <bool thread_safe>
PA_ALWAYS_INLINE PartitionTag*
SlotSpanMetadata<thread_safe>::DirectMapMTETag() {
PA_DCHECK(bucket->is_direct_mapped());
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
reinterpret_cast<PartitionPage<thread_safe>*>(this));
return &subsequent_page_metadata->direct_map_tag;
}
template <bool thread_safe>
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
PartitionFreelistEntry* new_head) {
@ -927,9 +960,8 @@ void IterateSlotSpans(uintptr_t super_page,
break;
}
slot_span = &page->slot_span_metadata;
if (callback(slot_span)) {
if (callback(slot_span))
return;
}
page += slot_span->bucket->get_pages_per_slot_span();
}
// Each super page must have at least one valid slot span.

View File

@ -93,8 +93,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
static constexpr CountType kPtrInc = 0x0000'0002;
#endif
PA_ALWAYS_INLINE explicit PartitionRefCount(
bool needs_mac11_malloc_size_hack);
explicit PartitionRefCount(bool needs_mac11_malloc_size_hack);
// Incrementing the counter doesn't imply any visibility about modified
// memory, hence relaxed atomics. For decrement, visibility is required before
@ -191,9 +190,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
CountType old_count =
count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit)))
DoubleFreeOrCorruptionDetected(old_count);
}
if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) ==
kMemoryHeldByAllocatorBit)) {
@ -228,9 +226,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
PA_ALWAYS_INLINE bool IsAlive() {
bool alive =
count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
if (alive) {
if (alive)
CheckCookieIfSupported();
}
return alive;
}
@ -351,10 +348,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
#endif
};
PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
bool needs_mac11_malloc_size_hack)
PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(bool use_mac11_hack)
: count_(kMemoryHeldByAllocatorBit |
(needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
(use_mac11_hack ? kNeedsMac11MallocSizeHackBit : 0))
#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
,
brp_cookie_(CalculateCookie())
@ -407,7 +403,7 @@ static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
// SystemPageSize() isn't always a constrexpr, in which case the compiler
// wouldn't know it's a power of two. The equivalence of these calculations is
// checked in PartitionAllocGlobalInit().
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetPartitionRefCountIndexMultiplierShift() {
return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
}

View File

@ -24,7 +24,6 @@
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
@ -47,9 +46,9 @@
#include <pthread.h>
#endif
#if BUILDFLAG(RECORD_ALLOC_INFO)
namespace partition_alloc::internal {
#if BUILDFLAG(RECORD_ALLOC_INFO)
// Even if this is not hidden behind a BUILDFLAG, it should not use any memory
// when recording is disabled, since it ends up in the .bss section.
AllocInfo g_allocs = {};
@ -58,47 +57,9 @@ void RecordAllocOrFree(uintptr_t addr, size_t size) {
g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
kAllocInfoSize] = {addr, size};
}
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
uintptr_t test_address,
size_t type_size) {
// Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address =
orig_address - kPartitionPastAllocationAdjustment;
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
// Don't use |adjusted_address| beyond this point at all. It was needed to
// pick the right slot, but now we're dealing with very concrete addresses.
// Zero it just in case, to catch errors.
adjusted_address = 0;
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled());
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
if (test_address < object_addr || object_end < test_address) {
return PtrPosWithinAlloc::kFarOOB;
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
} else if (object_end - type_size < test_address) {
// Not even a single element of the type referenced by the pointer can fit
// between the pointer and the end of the object.
return PtrPosWithinAlloc::kAllocEnd;
#endif
} else {
return PtrPosWithinAlloc::kInBounds;
}
}
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} // namespace partition_alloc::internal
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
namespace partition_alloc {
@ -344,7 +305,7 @@ namespace {
// more work and larger |slot_usage| array. Lower value would probably decrease
// chances of purging. Not empirically tested.
constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64;
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MinPurgeableSlotSize() {
return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
}
@ -905,18 +866,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
PartitionOptions::UseConfigurablePool::kIfAvailable) &&
IsConfigurablePoolAvailable();
PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
#if PA_CONFIG(HAS_MEMORY_TAGGING)
TagViolationReportingMode memory_tagging_mode =
internal::GetMemoryTaggingModeForCurrentThread();
// Memory tagging is not supported in the configurable pool because MTE
// stores tagging information in the high bits of the pointer, it causes
// issues with components like V8's ArrayBuffers which use custom pointer
// representations. All custom representations encountered so far rely on an
// "is in configurable pool?" check, so we use that as a proxy.
flags.memory_tagging_enabled_ =
!flags.use_configurable_pool &&
memory_tagging_mode != TagViolationReportingMode::kUndefined;
#endif
// brp_enabled() is not supported in the configurable pool because
// BRP requires objects to be in a different Pool.
@ -960,6 +909,11 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
PA_CHECK(!brp_enabled());
flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
}
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add one extra byte to each slot's end to allow beyond-the-end
// pointers (crbug.com/1364476).
flags.extras_size += 1;
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#endif // PA_CONFIG(EXTRAS_REQUIRED)
// Re-confirm the above PA_CHECKs, by making sure there are no
@ -1687,5 +1641,4 @@ static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
static_assert(
offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64,
"The lock should not be on the same cacheline as the read-mostly flags");
} // namespace partition_alloc

View File

@ -40,6 +40,7 @@
#include "base/allocator/partition_allocator/chromecast_buildflags.h"
#include "base/allocator/partition_allocator/freeslot_bitmap.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
@ -63,6 +64,8 @@
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
@ -290,9 +293,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
bool use_configurable_pool;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_;
#endif
#if BUILDFLAG(ENABLE_PKEYS)
int pkey;
@ -396,6 +396,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool quarantine_always_for_testing = false;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
partition_alloc::PartitionTag current_partition_tag = 0;
// Points to the end of the committed tag bitmap region.
uintptr_t next_tag_bitmap_page = 0;
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PartitionRoot()
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
@ -557,6 +563,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PA_ALWAYS_INLINE size_t
AllocationCapacityFromRequestedSize(size_t size) const;
PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
// Frees memory from this partition, if possible, by decommitting pages or
// even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
void PurgeMemory(int flags);
@ -685,7 +693,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// If quarantine is enabled and the tag overflows, move the containing slot
// to quarantine, to prevent the attacker from exploiting a pointer that has
// an old tag.
if (PA_LIKELY(memory_tagging_enabled())) {
if (PA_LIKELY(IsMemoryTaggingEnabled())) {
return internal::HasOverflowTag(object);
}
// Default behaviour if MTE is not enabled for this PartitionRoot.
@ -705,7 +713,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return flags.scan_mode == ScanMode::kEnabled;
}
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetDirectMapMetadataAndGuardPagesSize() {
// Because we need to fake a direct-map region to look like a super page, we
// need to allocate more pages around the payload:
@ -718,7 +726,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return 2 * internal::PartitionPageSize();
}
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetDirectMapSlotSize(size_t raw_size) {
// Caller must check that the size is not above the MaxDirectMapped()
// limit before calling. This also guards against integer overflow in the
@ -728,8 +736,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
raw_size, internal::SystemPageSize());
}
PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize(
size_t padded_raw_size) {
static PA_ALWAYS_INLINE size_t
GetDirectMapReservationSize(size_t padded_raw_size) {
// Caller must check that the size is not above the MaxDirectMapped()
// limit before calling. This also guards against integer overflow in the
// calculation here.
@ -819,19 +827,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return flags.use_configurable_pool;
}
// Returns whether MTE is supported for this partition root. Because MTE
// stores tagging information in the high bits of the pointer, it causes
// issues with components like V8's ArrayBuffers which use custom pointer
// representations. All custom representations encountered so far rely on an
// "is in configurable pool?" check, so we use that as a proxy.
bool memory_tagging_enabled() const {
#if PA_CONFIG(HAS_MEMORY_TAGGING)
return flags.memory_tagging_enabled_;
#else
return false;
#endif
}
// To make tests deterministic, it is necessary to uncap the amount of memory
// waste incurred by empty slot spans. Otherwise, the size of various
// freelists, and committed memory becomes harder to reason about (and
@ -840,6 +835,17 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
max_empty_slot_spans_dirty_bytes_shift = 0;
}
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PA_ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
// TODO(crbug.com/1298696): performance is not an issue. We can use
// random tags in lieu of sequential ones.
auto tag = ++current_partition_tag;
tag += !tag; // Avoid 0.
current_partition_tag = tag;
return tag;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Enables the sorting of active slot spans in PurgeMemory().
static void EnableSortActiveSlotSpans();
@ -916,7 +922,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// May return an invalid thread cache.
PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
static internal::Lock& GetEnumeratorLock();
@ -941,11 +946,9 @@ class ScopedSyscallTimer {
~ScopedSyscallTimer() {
root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
if (elapsed_nanos > 0) {
root_->syscall_total_time_ns.fetch_add(
static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
}
uint64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
root_->syscall_total_time_ns.fetch_add(elapsed_nanos,
std::memory_order_relaxed);
}
private:
@ -1041,34 +1044,47 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
}
// Return values to indicate where a pointer is pointing relative to the bounds
// of an allocation.
enum class PtrPosWithinAlloc {
// When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
// are also considered in-bounds.
kInBounds,
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
kAllocEnd,
#endif
kFarOOB
};
// Checks whether `test_address` is in the same allocation slot as
// `orig_address`.
//
// This can be called after adding or subtracting from the `orig_address`
// to produce a different pointer which must still stay in the same allocation.
//
// The `type_size` is the size of the type that the raw_ptr is pointing to,
// which may be the type the allocation is holding or a compatible pointer type
// such as a base class or char*. It is used to detect pointers near the end of
// the allocation but not strictly beyond it.
// Checks whether a given address stays within the same allocation slot after
// modification.
//
// This isn't a general purpose function. The caller is responsible for ensuring
// that the ref-count is in place for this allocation.
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
uintptr_t test_address,
size_t type_size);
template <typename Z>
PA_ALWAYS_INLINE PtrPosWithinAlloc
PartitionAllocIsValidPtrDelta(uintptr_t address, PtrDelta<Z> delta) {
// Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
// Don't use |adjusted_address| beyond this point at all. It was needed to
// pick the right slot, but now we're dealing with very concrete addresses.
// Zero it just in case, to catch errors.
adjusted_address = 0;
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled());
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t new_address =
address + static_cast<uintptr_t>(delta.delta_in_bytes);
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
if (new_address < object_addr || object_end < new_address) {
return PtrPosWithinAlloc::kFarOOB;
#if PA_CONFIG(USE_OOB_POISON)
} else if (object_end - delta.type_size < new_address) {
// Not even a single element of the type referenced by the pointer can fit
// between the pointer and the end of the object.
return PtrPosWithinAlloc::kAllocEnd;
#endif
} else {
return PtrPosWithinAlloc::kInBounds;
}
}
PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
@ -1205,6 +1221,21 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
FreeNoHooks(object);
}
// Returns whether MTE is supported for this partition root. Because MTE stores
// tagging information in the high bits of the pointer, it causes issues with
// components like V8's ArrayBuffers which use custom pointer representations.
// All custom representations encountered so far rely on an "is in configurable
// pool?" check, so we use that as a proxy.
template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
const {
#if PA_CONFIG(HAS_MEMORY_TAGGING)
return !flags.use_configurable_pool;
#else
return false;
#endif
}
// static
template <bool thread_safe>
PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
@ -1249,7 +1280,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
#if PA_CONFIG(HAS_MEMORY_TAGGING)
if (PA_LIKELY(root->memory_tagging_enabled())) {
if (PA_LIKELY(root->IsMemoryTaggingEnabled())) {
const size_t slot_size = slot_span->bucket->slot_size;
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
// slot_span is untagged at this point, so we have to recover its tag
@ -1276,6 +1307,13 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
PA_PREFETCH(slot_span);
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
partition_alloc::internal::PartitionTagIncrementValue(
slot_start, slot_span->bucket->slot_size);
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_STARSCAN)
// TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
// default.
@ -1509,7 +1547,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
SlotSpan* slot_span) {
// PA_LIKELY: performance-sensitive partitions have a thread cache,
// direct-mapped allocations are uncommon.
ThreadCache* thread_cache = GetThreadCache();
ThreadCache* thread_cache = GetOrCreateThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
!IsDirectMappedBucket(slot_span->bucket))) {
size_t bucket_index =
@ -1766,7 +1804,7 @@ PartitionRoot<thread_safe>::GetPageAccessibility() const {
PageAccessibilityConfiguration::Permissions permissions =
PageAccessibilityConfiguration::kReadWrite;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
if (memory_tagging_enabled()) {
if (IsMemoryTaggingEnabled()) {
permissions = PageAccessibilityConfiguration::kReadWriteTagged;
}
#endif
@ -2236,11 +2274,6 @@ ThreadCache* PartitionRoot<thread_safe>::GetOrCreateThreadCache() {
return thread_cache;
}
template <bool thread_safe>
ThreadCache* PartitionRoot<thread_safe>::GetThreadCache() {
return PA_LIKELY(flags.with_thread_cache) ? ThreadCache::Get() : nullptr;
}
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==

View File

@ -0,0 +1,144 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
// This file defines types and functions for `MTECheckedPtr<T>` (cf.
// `tagging.h`, which deals with real ARM MTE).
#include <string.h>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
namespace partition_alloc {
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
static_assert(
sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize,
"sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize.");
PA_ALWAYS_INLINE PartitionTag* NormalBucketPartitionTagPointer(uintptr_t addr) {
uintptr_t bitmap_base =
internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask);
const size_t bitmap_end_offset =
internal::PartitionPageSize() + internal::ReservedTagBitmapSize();
PA_DCHECK((addr & internal::kSuperPageOffsetMask) >= bitmap_end_offset);
uintptr_t offset_in_super_page =
(addr & internal::kSuperPageOffsetMask) - bitmap_end_offset;
size_t offset_in_bitmap = offset_in_super_page >>
internal::tag_bitmap::kBytesPerPartitionTagShift
<< internal::tag_bitmap::kPartitionTagSizeShift;
// No need to tag, as the tag bitmap region isn't protected by MTE.
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
}
PA_ALWAYS_INLINE PartitionTag* DirectMapPartitionTagPointer(uintptr_t addr) {
uintptr_t first_super_page = internal::GetDirectMapReservationStart(addr);
PA_DCHECK(first_super_page) << "not managed by a direct map: " << addr;
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
internal::PartitionSuperPageToMetadataArea<internal::ThreadSafe>(
first_super_page));
return &subsequent_page_metadata->direct_map_tag;
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
// UNLIKELY because direct maps are far less common than normal buckets.
if (PA_UNLIKELY(internal::IsManagedByDirectMap(addr))) {
return DirectMapPartitionTagPointer(addr);
}
return NormalBucketPartitionTagPointer(addr);
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagPointer relates to software MTE
// (i.e. MTECheckedPtr) and it returns a pointer to the tag in memory.
return PartitionTagPointer(UntagPtr(ptr));
}
namespace internal {
PA_ALWAYS_INLINE void DirectMapPartitionTagSetValue(uintptr_t addr,
PartitionTag value) {
*DirectMapPartitionTagPointer(addr) = value;
}
PA_ALWAYS_INLINE void NormalBucketPartitionTagSetValue(uintptr_t slot_start,
size_t size,
PartitionTag value) {
PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0);
PA_DCHECK((slot_start % tag_bitmap::kBytesPerPartitionTag) == 0);
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = NormalBucketPartitionTagPointer(slot_start);
if (sizeof(PartitionTag) == 1) {
memset(tag_ptr, value, tag_count);
} else {
while (tag_count-- > 0)
*tag_ptr++ = value;
}
}
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {
return *PartitionTagPointer(ptr);
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {
PartitionTag tag = *PartitionTagPointer(slot_start);
PartitionTag new_tag = tag;
++new_tag;
new_tag += !new_tag; // Avoid 0.
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
// This verifies that tags for the entire slot have the same value and that
// |size| doesn't exceed the slot size.
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = PartitionTagPointer(slot_start);
while (tag_count-- > 0) {
PA_DCHECK(tag == *tag_ptr);
tag_ptr++;
}
#endif
NormalBucketPartitionTagSetValue(slot_start, size, new_tag);
}
} // namespace internal
#else // No-op versions
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
PA_NOTREACHED();
return nullptr;
}
namespace internal {
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) {
return 0;
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {}
} // namespace internal
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_

View File

@ -0,0 +1,147 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc::internal {
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
namespace tag_bitmap {
// kPartitionTagSize should be equal to sizeof(PartitionTag).
// PartitionTag is defined in partition_tag.h and static_assert there
// checks the condition.
static constexpr size_t kPartitionTagSizeShift = 0;
static constexpr size_t kPartitionTagSize = 1U << kPartitionTagSizeShift;
static constexpr size_t kBytesPerPartitionTagShift = 4;
// One partition tag is assigned per |kBytesPerPartitionTag| bytes in the slot
// spans.
// +-----------+ 0
// | | ====> 1 partition tag
// +-----------+ kBytesPerPartitionTag
// | | ====> 1 partition tag
// +-----------+ 2*kBytesPerPartitionTag
// ...
// +-----------+ slot_size
static constexpr size_t kBytesPerPartitionTag = 1U
<< kBytesPerPartitionTagShift;
static_assert(
kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
static constexpr size_t kBytesPerPartitionTagRatio =
kBytesPerPartitionTag / kPartitionTagSize;
static_assert(kBytesPerPartitionTag > 0,
"kBytesPerPartitionTag should be larger than 0");
static_assert(
kBytesPerPartitionTag % kPartitionTagSize == 0,
"kBytesPerPartitionTag should be multiples of sizeof(PartitionTag).");
constexpr size_t CeilCountOfUnits(size_t size, size_t unit_size) {
return (size + unit_size - 1) / unit_size;
}
} // namespace tag_bitmap
// kTagBitmapSize is calculated in the following way:
// (1) kSuperPageSize - 2 * PartitionPageSize() = kTagBitmapSize +
// SlotSpanSize()
// (2) kTagBitmapSize >= SlotSpanSize() / kBytesPerPartitionTag *
// sizeof(PartitionTag)
//--
// (1)' SlotSpanSize() = kSuperPageSize - 2 * PartitionPageSize() -
// kTagBitmapSize
// (2)' SlotSpanSize() <= kTagBitmapSize * Y
// (3)' Y = kBytesPerPartitionTag / sizeof(PartitionTag) =
// kBytesPerPartitionTagRatio
//
// kTagBitmapSize * Y >= kSuperPageSize - 2 * PartitionPageSize() -
// kTagBitmapSize (1 + Y) * kTagBimapSize >= kSuperPageSize - 2 *
// PartitionPageSize()
// Finally,
// kTagBitmapSize >= (kSuperPageSize - 2 * PartitionPageSize()) / (1 + Y)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerTagBitmap() {
return tag_bitmap::CeilCountOfUnits(
kSuperPageSize / PartitionPageSize() - 2,
tag_bitmap::kBytesPerPartitionTagRatio + 1);
}
// To make guard pages between the tag bitmap and the slot span, calculate the
// number of SystemPages of TagBitmap. If kNumSystemPagesPerTagBitmap *
// SystemPageSize() < kTagBitmapSize, guard pages will be created. (c.f. no
// guard pages if sizeof(PartitionTag) == 2.)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumSystemPagesPerTagBitmap() {
return tag_bitmap::CeilCountOfUnits(
kSuperPageSize / SystemPageSize() -
2 * PartitionPageSize() / SystemPageSize(),
tag_bitmap::kBytesPerPartitionTagRatio + 1);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ActualTagBitmapSize() {
return NumSystemPagesPerTagBitmap() * SystemPageSize();
}
// PartitionPageSize-aligned tag bitmap size.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedTagBitmapSize() {
return PartitionPageSize() * NumPartitionPagesPerTagBitmap();
}
#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
static_assert(ActualTagBitmapSize() <= ReservedTagBitmapSize(),
"kActualTagBitmapSize should be smaller than or equal to "
"kReservedTagBitmapSize.");
static_assert(ReservedTagBitmapSize() - ActualTagBitmapSize() <
PartitionPageSize(),
"Unused space in the tag bitmap should be smaller than "
"PartitionPageSize()");
// The region available for slot spans is the reminder of the super page, after
// taking away the first and last partition page (for metadata and guard pages)
// and partition pages reserved for the freeslot bitmap and the tag bitmap.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SlotSpansSize() {
return kSuperPageSize - 2 * PartitionPageSize() - ReservedTagBitmapSize();
}
static_assert(ActualTagBitmapSize() * tag_bitmap::kBytesPerPartitionTagRatio >=
SlotSpansSize(),
"bitmap is large enough to cover slot spans");
static_assert((ActualTagBitmapSize() - PartitionPageSize()) *
tag_bitmap::kBytesPerPartitionTagRatio <
SlotSpansSize(),
"any smaller bitmap wouldn't suffice to cover slots spans");
#endif // PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
#else
constexpr PA_ALWAYS_INLINE size_t NumPartitionPagesPerTagBitmap() {
return 0;
}
constexpr PA_ALWAYS_INLINE size_t ActualTagBitmapSize() {
return 0;
}
constexpr PA_ALWAYS_INLINE size_t ReservedTagBitmapSize() {
return 0;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_

View File

@ -0,0 +1,25 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#include <cstdint>
// This header defines the types for MTECheckedPtr. Canonical
// documentation available at `//base/memory/raw_ptr_mtecheckedptr.md`.
namespace partition_alloc {
// Use 8 bits for the partition tag. This is the "lower" byte of the
// two top bytes in a 64-bit pointer. The "upper" byte of the same
// is reserved for true ARM MTE.
//
// MTECheckedPtr is not yet compatible with ARM MTE, but it is a
// distant goal to have them coexist.
using PartitionTag = uint8_t;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_

View File

@ -108,9 +108,8 @@ PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
DWORD saved_error = GetLastError();
void* ret = TlsGetValue(key);
// Only non-zero errors need to be restored.
if (PA_UNLIKELY(saved_error)) {
if (PA_UNLIKELY(saved_error))
SetLastError(saved_error);
}
return ret;
}

View File

@ -19,21 +19,18 @@ void (*g_on_dll_process_detach)() = nullptr;
void NTAPI PartitionTlsOnThreadExit(PVOID module,
DWORD reason,
PVOID reserved) {
if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH) {
if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH)
return;
}
if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach) {
if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach)
g_on_dll_process_detach();
}
if (g_destructor) {
void* per_thread_data = PartitionTlsGet(g_key);
if (per_thread_data) {
if (per_thread_data)
g_destructor(per_thread_data);
}
}
}
} // namespace

View File

@ -98,9 +98,8 @@ void Wrpkru(uint32_t pkru) {
LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
: saved_pkey_value_(kDefaultPkeyValue) {
if (!PkeySettings::settings.enabled) {
if (!PkeySettings::settings.enabled)
return;
}
saved_pkey_value_ = Rdpkru();
if (saved_pkey_value_ != kDefaultPkeyValue) {
Wrpkru(kAllowAllPkeyValue);
@ -108,9 +107,8 @@ LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
}
LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
if (!PkeySettings::settings.enabled) {
if (!PkeySettings::settings.enabled)
return;
}
if (Rdpkru() != saved_pkey_value_) {
Wrpkru(saved_pkey_value_);
}

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,6 @@
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -27,23 +26,20 @@ template <bool IsAdjustablePtr>
struct RawPtrAsanUnownedImpl {
// Wraps a pointer.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
return ptr;
}
// Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
}
}
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
T* wrapped_ptr) {
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
// ASAN will catch use of dereferenced ptr without additional probing.
return wrapped_ptr;
}
@ -51,25 +47,21 @@ struct RawPtrAsanUnownedImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
}
return wrapped_ptr;
}
// Unwraps the pointer, without making an assertion on whether memory was
// freed or not.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
T* wrapped_ptr) {
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
return wrapped_ptr;
}
// Upcasts the wrapped pointer.
template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in
@ -82,23 +74,13 @@ struct RawPtrAsanUnownedImpl {
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
return wrapped_ptr + delta_elems;
}
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
return wrapped_ptr - delta_elems;
}
template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) {
return wrapped_ptr1 - wrapped_ptr2;
}
@ -106,7 +88,7 @@ struct RawPtrAsanUnownedImpl {
// Returns a copy of a wrapped pointer, without making an assertion on whether
// memory was freed or not.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
return wrapped_ptr;
}
@ -121,21 +103,19 @@ struct RawPtrAsanUnownedImpl {
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
return ptr;
}
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
T* wrapped_ptr) {
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
return wrapped_ptr;
}
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
};
} // namespace base::internal

View File

@ -64,29 +64,6 @@ void RawPtrBackupRefImpl<AllowDangling>::ReportIfDanglingInternal(
}
}
// static
template <bool AllowDangling>
bool RawPtrBackupRefImpl<AllowDangling>::CheckPointerWithinSameAlloc(
uintptr_t before_addr,
uintptr_t after_addr,
size_t type_size) {
partition_alloc::internal::PtrPosWithinAlloc ptr_pos_within_alloc =
partition_alloc::internal::IsPtrWithinSameAlloc(before_addr, after_addr,
type_size);
// No need to check that |new_ptr| is in the same pool, as
// IsPtrWithinSameAlloc() checks that it's within the same allocation, so
// must be the same pool.
PA_BASE_CHECK(ptr_pos_within_alloc !=
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
return ptr_pos_within_alloc ==
partition_alloc::internal::PtrPosWithinAlloc::kAllocEnd;
#else
return false;
#endif
}
template <bool AllowDangling>
bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -98,11 +75,38 @@ bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
->IsAlive();
}
template <bool AllowDangling>
template <typename Z>
partition_alloc::PtrPosWithinAlloc
RawPtrBackupRefImpl<AllowDangling>::IsValidDelta(
uintptr_t address,
partition_alloc::internal::PtrDelta<Z> delta) {
return partition_alloc::internal::PartitionAllocIsValidPtrDelta(address,
delta);
}
// Explicitly instantiates the two BackupRefPtr variants in the .cc. This
// ensures the definitions not visible from the .h are available in the binary.
template struct RawPtrBackupRefImpl</*AllowDangling=*/false>;
template struct RawPtrBackupRefImpl</*AllowDangling=*/true>;
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<size_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<ptrdiff_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<size_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<ptrdiff_t>);
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
if (partition_alloc::internal::IsManagedByDirectMap(address)) {

View File

@ -13,7 +13,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -37,8 +36,7 @@ struct RawPtrBackupRefImpl {
// threads modify the same smart pointer object without synchronization, a
// data race will occur.
private:
PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
static PA_ALWAYS_INLINE bool IsSupportedAndNotNull(uintptr_t address) {
// There are many situations where the compiler can prove that
// `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
// way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
@ -89,7 +87,7 @@ struct RawPtrBackupRefImpl {
return is_in_brp_pool;
}
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#if PA_CONFIG(USE_OOB_POISON)
// Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
// one byte.
#if defined(ARCH_CPU_X86_64)
@ -103,36 +101,32 @@ struct RawPtrBackupRefImpl {
#endif
template <typename T>
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
~OOB_POISON_BIT);
}
template <typename T>
PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) {
static PA_ALWAYS_INLINE bool IsPtrOOB(T* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
OOB_POISON_BIT;
}
template <typename T>
PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) {
static PA_ALWAYS_INLINE T* PoisonOOBPtr(T* ptr) {
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
OOB_POISON_BIT);
}
#else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#else // PA_USE_OOB_POISON
template <typename T>
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
return ptr;
}
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#endif // PA_USE_OOB_POISON
public:
// Wraps a pointer.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return ptr;
}
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
if (IsSupportedAndNotNull(address)) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -163,10 +157,7 @@ struct RawPtrBackupRefImpl {
// Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return;
}
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
if (IsSupportedAndNotNull(address)) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -186,13 +177,9 @@ struct RawPtrBackupRefImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#if PA_CONFIG(USE_OOB_POISON)
PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
#endif
uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
@ -208,13 +195,9 @@ struct RawPtrBackupRefImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#if PA_CONFIG(USE_OOB_POISON)
// Some code uses invalid pointer values as indicators, so those values must
// be passed through unchanged during extraction. The following check will
// pass invalid values through if those values do not fall within the BRP
@ -227,18 +210,14 @@ struct RawPtrBackupRefImpl {
// OOB conditions, e.g., in code that extracts an end-of-allocation pointer
// for use in a loop termination condition. The poison bit would make that
// pointer appear to reference a very high address.
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#endif // PA_CONFIG(USE_OOB_POISON)
return unpoisoned_ptr;
}
// Unwraps the pointer, without making an assertion on whether memory was
// freed or not.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
// This may be used for unwrapping an end-of-allocation pointer to be used
// as an endpoint in an iterative algorithm, so this removes the OOB poison
// bit.
@ -247,7 +226,7 @@ struct RawPtrBackupRefImpl {
// Upcasts the wrapped pointer.
template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in
@ -255,12 +234,53 @@ struct RawPtrBackupRefImpl {
return wrapped_ptr;
}
// Verify the pointer stayed in the same slot, and return the poisoned version
// of `new_ptr` if OOB poisoning is enabled.
template <typename T>
PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat(
T* unpoisoned_ptr,
T* new_ptr) {
// Advance the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
T* new_ptr = unpoisoned_ptr + delta_elems;
// First check if the new address didn't migrate in/out the BRP pool, and
// that it lands within the same allocation. An end-of-allocation address is
// ok, too, and that may lead to the pointer being poisoned if the relevant
// feature is enabled. These checks add a non-trivial cost, but they're
// cheaper and more secure than the previous implementation that rewrapped
// the pointer (wrapped the new pointer and unwrapped the old one).
//
// Note, the value of these checks goes beyond OOB protection. They're
// important for integrity of the BRP algorithm. Without these, an attacker
// could make the pointer point to another allocation, and cause its
// ref-count to go to 0 upon this pointer's destruction, even though there
// may be another pointer still pointing to it, thus making it lose the BRP
// protection prematurely.
uintptr_t address = partition_alloc::UntagPtr(unpoisoned_ptr);
// TODO(bartekn): Consider adding support for non-BRP pools too (without
// removing the cross-pool migration check).
if (IsSupportedAndNotNull(address)) {
auto ptr_pos_within_alloc = IsValidDelta(
address, delta_elems * static_cast<Z>(sizeof(T)), sizeof(T));
// No need to check that |new_ptr| is in the same pool, as IsValidDeta()
// checks that it's within the same allocation, so must be the same pool.
PA_BASE_CHECK(ptr_pos_within_alloc !=
partition_alloc::PtrPosWithinAlloc::kFarOOB);
#if PA_CONFIG(USE_OOB_POISON)
if (ptr_pos_within_alloc ==
partition_alloc::PtrPosWithinAlloc::kAllocEnd) {
new_ptr = PoisonOOBPtr(new_ptr);
}
#endif
} else {
// Check that the new address didn't migrate into the BRP pool, as it
// would result in more pointers pointing to an allocation than its
// ref-count reflects.
PA_BASE_CHECK(!IsSupportedAndNotNull(partition_alloc::UntagPtr(new_ptr)));
}
return new_ptr;
#else // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// In the "before allocation" mode, on 32-bit, we can run into a problem
// that the end-of-allocation address could fall outside of
// PartitionAlloc's pools, if this is the last slot of the super page,
@ -285,99 +305,26 @@ struct RawPtrBackupRefImpl {
// This problem doesn't exist in the "previous slot" mode, or any mode that
// involves putting extras after the allocation, because the
// end-of-allocation address belongs to the same slot.
static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT));
// First check if the new address didn't migrate in/out the BRP pool, and
// that it lands within the same allocation. An end-of-allocation address is
// ok, too, and that may lead to the pointer being poisoned if the relevant
// feature is enabled. These checks add a non-trivial cost, but they're
// cheaper and more secure than the previous implementation that rewrapped
// the pointer (wrapped the new pointer and unwrapped the old one).
//
// Note, the value of these checks goes beyond OOB protection. They're
// important for integrity of the BRP algorithm. Without these, an attacker
// could make the pointer point to another allocation, and cause its
// ref-count to go to 0 upon this pointer's destruction, even though there
// may be another pointer still pointing to it, thus making it lose the BRP
// protection prematurely.
const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
// TODO(bartekn): Consider adding support for non-BRP pools too (without
// removing the cross-pool migration check).
if (IsSupportedAndNotNull(before_addr)) {
constexpr size_t size = sizeof(T);
[[maybe_unused]] const bool is_end =
CheckPointerWithinSameAlloc(before_addr, after_addr, size);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
if (is_end) {
new_ptr = PoisonOOBPtr(new_ptr);
}
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
} else {
// Check that the new address didn't migrate into the BRP pool, as it
// would result in more pointers pointing to an allocation than its
// ref-count reflects.
PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
}
return new_ptr;
}
// Advance the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr + delta_elems;
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr + delta_elems);
}
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr - delta_elems;
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr - delta_elems);
static_assert(false);
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
}
template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr1 - wrapped_ptr2;
}
T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
if (partition_alloc::internal::base::is_constant_evaluated()) {
return unpoisoned_ptr1 - unpoisoned_ptr2;
}
uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
// Ensure that both pointers are within the same slot, and pool!
// TODO(bartekn): Consider adding support for non-BRP pool too.
if (IsSupportedAndNotNull(address1)) {
PA_BASE_CHECK(IsSupportedAndNotNull(address2));
PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc(
address2, address1, sizeof(T)) !=
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
PA_BASE_CHECK(IsValidDelta(address2, address1 - address2, sizeof(T)) !=
partition_alloc::PtrPosWithinAlloc::kFarOOB);
} else {
PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
}
#endif // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
return unpoisoned_ptr1 - unpoisoned_ptr2;
}
@ -385,45 +332,32 @@ struct RawPtrBackupRefImpl {
// memory was freed or not.
// This method increments the reference count of the allocation slot.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
return WrapRawPtr(wrapped_ptr);
}
// Report the current wrapped pointer if pointee isn't alive anymore.
template <typename T>
PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) {
static PA_ALWAYS_INLINE void ReportIfDangling(T* wrapped_ptr) {
ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
}
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return ptr;
} else {
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
return WrapRawPtr(ptr);
}
}
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
} else {
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
return UnpoisonPtr(wrapped_ptr);
}
}
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
private:
// We've evaluated several strategies (inline nothing, various parts, or
@ -432,24 +366,31 @@ struct RawPtrBackupRefImpl {
// lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
// Therefore, we've extracted the rest into the functions below and marked
// them as PA_NOINLINE to prevent unintended LTO effects.
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal(
uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal(
uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive(
uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal(
uintptr_t address);
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
void AcquireInternal(uintptr_t address);
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
void ReleaseInternal(uintptr_t address);
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
bool IsPointeeAlive(uintptr_t address);
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
void ReportIfDanglingInternal(uintptr_t address);
template <
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE partition_alloc::PtrPosWithinAlloc
IsValidDelta(uintptr_t address, Z delta_in_bytes, size_t type_size) {
using delta_t = std::conditional_t<std::is_signed_v<Z>, ptrdiff_t, size_t>;
partition_alloc::internal::PtrDelta<delta_t> ptr_delta(delta_in_bytes,
type_size);
// CHECK if `before_addr` and `after_addr` are in the same allocation, for a
// given `type_size`.
// If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation
// is at the end.
// If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false.
PA_NOINLINE static PA_COMPONENT_EXPORT(
RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr,
uintptr_t after_addr,
size_t type_size);
return IsValidDelta(address, ptr_delta);
}
template <typename Z>
static PA_COMPONENT_EXPORT(RAW_PTR)
PA_NOINLINE partition_alloc::PtrPosWithinAlloc
IsValidDelta(uintptr_t address,
partition_alloc::internal::PtrDelta<Z> delta);
};
} // namespace base::internal

View File

@ -11,7 +11,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -46,60 +45,47 @@ PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
struct RawPtrHookableImpl {
// Wraps a pointer.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
}
return ptr;
}
// Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* ptr) {
GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
}
}
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
GetRawPtrHooks()->safely_unwrap_for_dereference(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr;
}
// Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
GetRawPtrHooks()->safely_unwrap_for_extraction(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr;
}
// Unwraps the pointer, without making an assertion on whether memory was
// freed or not.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
GetRawPtrHooks()->unsafely_unwrap_for_comparison(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr;
}
// Upcasts the wrapped pointer.
template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in
@ -112,33 +98,16 @@ struct RawPtrHookableImpl {
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
GetRawPtrHooks()->advance(
reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
}
return wrapped_ptr + delta_elems;
}
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
GetRawPtrHooks()->advance(
reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
}
return wrapped_ptr - delta_elems;
}
template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) {
return wrapped_ptr1 - wrapped_ptr2;
}
@ -146,31 +115,27 @@ struct RawPtrHookableImpl {
// Returns a copy of a wrapped pointer, without making an assertion on whether
// memory was freed or not.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr;
}
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
return ptr;
}
template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
T* wrapped_ptr) {
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
return wrapped_ptr;
}
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
};
} // namespace base::internal

View File

@ -73,28 +73,21 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// and aborts. Failure to clear would be indicated by the related death tests
// not CHECKing appropriately.
static constexpr bool need_clear_after_move =
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
std::is_same_v<Impl,
internal::MTECheckedPtrImpl<
internal::MTECheckedPtrImplPartitionAllocSupport>> ||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
// Construct a raw_ref from a pointer, which must not be null.
//
// This function is safe to use with any pointer, as it will CHECK and
// terminate the process if the pointer is null. Avoid dereferencing a pointer
// to avoid this CHECK as you may be dereferencing null.
PA_ALWAYS_INLINE constexpr static raw_ref from_ptr(T* ptr) noexcept {
PA_RAW_PTR_CHECK(ptr);
return raw_ref(*ptr);
}
// Construct a raw_ref from a reference.
PA_ALWAYS_INLINE constexpr explicit raw_ref(T& p) noexcept
PA_ALWAYS_INLINE explicit raw_ref(T& p) noexcept
: inner_(std::addressof(p)) {}
// Assign a new reference to the raw_ref, replacing the existing reference.
PA_ALWAYS_INLINE constexpr raw_ref& operator=(T& p) noexcept {
PA_ALWAYS_INLINE raw_ref& operator=(T& p) noexcept {
inner_.operator=(&p);
return *this;
}
@ -103,26 +96,24 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
raw_ref(const T&& p) = delete;
raw_ref& operator=(const T&& p) = delete;
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref& p) noexcept
: inner_(p.inner_) {
PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
}
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref&& p) noexcept
: inner_(std::move(p.inner_)) {
PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) {
p.inner_ = nullptr;
}
}
PA_ALWAYS_INLINE constexpr raw_ref& operator=(const raw_ref& p) noexcept {
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_);
return *this;
}
PA_ALWAYS_INLINE constexpr raw_ref& operator=(raw_ref&& p) noexcept {
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) {
@ -132,24 +123,16 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
}
// Deliberately implicit in order to support implicit upcast.
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref<U, PassedTraits>& p) noexcept
PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
: inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
}
// Deliberately implicit in order to support implicit upcast.
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref<U, PassedTraits>&& p) noexcept
PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
: inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) {
@ -157,25 +140,20 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
}
}
static PA_ALWAYS_INLINE raw_ref from_ptr(T* ptr) noexcept {
PA_RAW_PTR_CHECK(ptr);
return raw_ref(*ptr);
}
// Upcast assignment
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
const raw_ref<U, PassedTraits>& p) noexcept {
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_);
return *this;
}
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
raw_ref<U, PassedTraits>&& p) noexcept {
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) {
@ -184,7 +162,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
return *this;
}
PA_ALWAYS_INLINE constexpr T& operator*() const {
PA_ALWAYS_INLINE T& operator*() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator*();
}
@ -193,13 +171,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// rather than GetForDereference semantics (see raw_ptr.h). This should be
// used in place of operator*() when the memory referred to by the reference
// is not immediately going to be accessed.
PA_ALWAYS_INLINE constexpr T& get() const {
PA_ALWAYS_INLINE T& get() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return *inner_.get();
}
PA_ALWAYS_INLINE constexpr T* operator->() const
PA_ATTRIBUTE_RETURNS_NONNULL {
PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator->();
}
@ -212,90 +189,89 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
inner_.ReportIfDangling();
}
PA_ALWAYS_INLINE friend constexpr void swap(raw_ref& lhs,
raw_ref& rhs) noexcept {
friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
swap(lhs.inner_, rhs.inner_);
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator==(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator!=(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator<(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator>(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator<=(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator>=(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator==(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ == &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator!=(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ != &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ < &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ > &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<=(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ <= &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>=(const raw_ref& lhs, const U& rhs) {
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ >= &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator==(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs == rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator!=(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs != rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs < rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs > rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<=(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs <= rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>=(const U& lhs, const raw_ref& rhs) {
friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs >= rhs.inner_;
}

View File

@ -90,10 +90,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
uint16_t offsets[kReservationOffsetTableLength] = {};
constexpr _ReservationOffsetTable() {
for (uint16_t& offset : offsets) {
for (uint16_t& offset : offsets)
offset = kOffsetTagNotAllocated;
}
}
};
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// If pkey support is enabled, we need to pkey-tag the tables of the pkey
@ -194,9 +193,8 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
uint16_t* offset_ptr = ReservationOffsetPointer(address);
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
if (*offset_ptr == kOffsetTagNormalBuckets) {
if (*offset_ptr == kOffsetTagNormalBuckets)
return 0;
}
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
#if BUILDFLAG(PA_DCHECK_IS_ON)
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded
@ -242,9 +240,8 @@ GetDirectMapReservationStart(uintptr_t address,
address);
uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
if (*offset_ptr == kOffsetTagNormalBuckets) {
if (*offset_ptr == kOffsetTagNormalBuckets)
return 0;
}
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
return reservation_start;

View File

@ -189,8 +189,6 @@ BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
using EnableBrpPartitionMemoryReclaimer =
base::StrongAlias<class EnableBrpPartitionMemoryReclaimerTag, bool>;
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
using UseDedicatedAlignedPartition =
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
@ -204,7 +202,6 @@ using AlternateBucketDistribution =
BASE_EXPORT void ConfigurePartitions(
EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AddDummyRefCount add_dummy_ref_count,

View File

@ -584,7 +584,6 @@ void EnablePartitionAllocMemoryReclaimer() {
void ConfigurePartitions(
EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AddDummyRefCount add_dummy_ref_count,
@ -693,14 +692,6 @@ void ConfigurePartitions(
// is replaced, it must've been g_original_root.
PA_CHECK(current_aligned_root == g_original_root);
if (enable_brp_memory_reclaimer) {
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(new_root);
if (new_aligned_root != new_root) {
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
new_aligned_root);
}
}
// Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory(
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |

View File

@ -54,9 +54,8 @@ void SpinningMutex::AcquireSpinThenBlock() {
int tries = 0;
int backoff = 1;
do {
if (PA_LIKELY(Try())) {
if (PA_LIKELY(Try()))
return;
}
// Note: Per the intel optimization manual
// (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
// the "pause" instruction is more costly on Skylake Client than on previous

View File

@ -73,11 +73,7 @@ class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
private:
PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
#if PA_CONFIG(HAS_FAST_MUTEX)
void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
#else
PA_ALWAYS_INLINE void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
#endif
// See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
// cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
@ -123,9 +119,8 @@ PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
// 1. We don't know how much contention the lock would experience
// 2. This may lead to weird-looking code layout when inlined into a caller
// with PA_(UN)LIKELY() annotations.
if (Try()) {
if (Try())
return;
}
return AcquireSpinThenBlock();
}

View File

@ -16,15 +16,14 @@
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
#include "base/allocator/partition_allocator/tagging.h"
namespace partition_alloc {
class StatsReporter;
namespace internal {
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DoubleFreeAttempt();
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE PA_NOT_TAIL_CALLED
void DoubleFreeAttempt();
// PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates
// use-after-free bugs by verifying that there are no pointers in memory which
@ -109,10 +108,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
static void PerformDelayedScan(int64_t delay_in_microseconds);
// Enables safepoints in mutator threads.
PA_ALWAYS_INLINE static void EnableSafepoints();
static void EnableSafepoints();
// Join scan from safepoint in mutator thread. As soon as PCScan is scheduled,
// mutators can join PCScan helping out with clearing and scanning.
PA_ALWAYS_INLINE static void JoinScanIfNeeded();
static void JoinScanIfNeeded();
// Checks if there is a PCScan task currently in progress.
PA_ALWAYS_INLINE static bool IsInProgress();
@ -136,7 +135,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
static void UninitForTesting();
static inline PCScanScheduler& scheduler();
inline static PCScanScheduler& scheduler();
// Registers reporting class.
static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);

View File

@ -35,7 +35,7 @@ struct QuarantineData final {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend {
public:
inline constexpr explicit PCScanSchedulingBackend(PCScanScheduler&);
explicit inline constexpr PCScanSchedulingBackend(PCScanScheduler&);
// No virtual destructor to allow constant initialization of PCScan as
// static global which directly embeds LimitBackend as default backend.
@ -82,7 +82,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LimitBackend final
public:
static constexpr double kQuarantineSizeFraction = 0.1;
inline constexpr explicit LimitBackend(PCScanScheduler&);
explicit inline constexpr LimitBackend(PCScanScheduler&);
bool LimitReached() final;
void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
@ -188,7 +188,7 @@ QuarantineData& PCScanSchedulingBackend::GetQuarantineData() {
constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler)
: PCScanSchedulingBackend(scheduler) {}
PA_ALWAYS_INLINE bool PCScanScheduler::AccountFreed(size_t size) {
bool PCScanScheduler::AccountFreed(size_t size) {
const size_t size_before =
quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed);
return (size_before + size >

View File

@ -14,7 +14,7 @@ namespace partition_alloc::internal {
// Returns the current stack pointer.
// TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t* GetStackPointer();
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE uintptr_t* GetStackPointer();
// Returns the top of the stack using system API.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop();

View File

@ -130,9 +130,8 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
// Randomly tag a region (MTE-enabled systems only). The first 16-byte
// granule is randomly tagged, all other granules in the region are
// then assigned that initial tag via __arm_mte_set_tag.
if (!CheckTagRegionParameters(ptr, sz)) {
if (!CheckTagRegionParameters(ptr, sz))
return nullptr;
}
// __arm_mte_create_random_tag generates a randomly tagged pointer via the
// hardware's random number generator, but does not apply it to the memory.
char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
@ -147,9 +146,8 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
// Increment a region's tag (MTE-enabled systems only), using the tag of the
// first granule.
if (!CheckTagRegionParameters(ptr, sz)) {
if (!CheckTagRegionParameters(ptr, sz))
return nullptr;
}
// Increment ptr's tag.
char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {

View File

@ -93,23 +93,19 @@ void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
ThreadCache* previous_head = list_head_;
list_head_ = cache;
cache->next_ = previous_head;
if (previous_head) {
if (previous_head)
previous_head->prev_ = cache;
}
}
void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
internal::ScopedGuard scoped_locker(GetLock());
if (cache->prev_) {
if (cache->prev_)
cache->prev_->next_ = cache->next_;
}
if (cache->next_) {
if (cache->next_)
cache->next_->prev_ = cache->prev_;
}
if (cache == list_head_) {
if (cache == list_head_)
list_head_ = cache->next_;
}
}
void ThreadCacheRegistry::DumpStats(bool my_thread_only,
ThreadCacheStats* stats) {
@ -119,9 +115,8 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only,
internal::ScopedGuard scoped_locker(GetLock());
if (my_thread_only) {
auto* tcache = ThreadCache::Get();
if (!ThreadCache::IsValid(tcache)) {
if (!ThreadCache::IsValid(tcache))
return;
}
tcache->AccumulateStats(stats);
} else {
ThreadCache* tcache = list_head_;
@ -151,9 +146,8 @@ void ThreadCacheRegistry::PurgeAll() {
// the main thread for the partition lock, since it is acquired/released once
// per bucket. By purging the main thread first, we avoid these interferences
// for this thread at least.
if (ThreadCache::IsValid(current_thread_tcache)) {
if (ThreadCache::IsValid(current_thread_tcache))
current_thread_tcache->Purge();
}
{
internal::ScopedGuard scoped_locker(GetLock());
@ -164,9 +158,8 @@ void ThreadCacheRegistry::PurgeAll() {
// point".
// Note that this will not work if the other thread is sleeping forever.
// TODO(lizeb): Handle sleeping threads.
if (tcache != current_thread_tcache) {
if (tcache != current_thread_tcache)
tcache->SetShouldPurge();
}
tcache = tcache->next_;
}
}
@ -224,9 +217,8 @@ void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
// If this is called before *any* thread cache has serviced *any*
// allocation, which can happen in tests, and in theory in non-test code as
// well.
if (!tcache) {
if (!tcache)
return;
}
// Setting the global limit while locked, because we need |tcache->root_|.
ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
@ -264,9 +256,8 @@ void ThreadCacheRegistry::RunPeriodicPurge() {
// Can run when there is no thread cache, in which case there is nothing to
// do, and the task should not be rescheduled. This would typically indicate
// a case where the thread cache was never enabled, or got disabled.
if (!tcache) {
if (!tcache)
return;
}
while (tcache) {
cached_memory_approx += tcache->cached_memory_;
@ -325,9 +316,8 @@ void ThreadCache::EnsureThreadSpecificDataInitialized() {
// adding a special-pupose lock.
internal::ScopedGuard scoped_locker(
ThreadCacheRegistry::Instance().GetLock());
if (g_thread_cache_key_created) {
if (g_thread_cache_key_created)
return;
}
bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
PA_CHECK(ok);
@ -343,9 +333,8 @@ void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
auto* old_tcache = ThreadCache::Get();
g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
if (old_tcache) {
if (old_tcache)
ThreadCache::DeleteForTesting(old_tcache);
}
if (root) {
Init(root);
Create(root);
@ -432,9 +421,8 @@ void ThreadCache::SetGlobalLimits(PartitionRoot<>* root, float multiplier) {
// static
void ThreadCache::SetLargestCachedSize(size_t size) {
if (size > ThreadCache::kLargeSizeThreshold) {
if (size > ThreadCache::kLargeSizeThreshold)
size = ThreadCache::kLargeSizeThreshold;
}
largest_active_bucket_index_ =
PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
size,
@ -524,9 +512,8 @@ ThreadCache::~ThreadCache() {
void ThreadCache::Delete(void* tcache_ptr) {
auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
if (!IsValid(tcache)) {
if (!IsValid(tcache))
return;
}
#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
internal::g_thread_cache = nullptr;
@ -630,9 +617,8 @@ void ThreadCache::FillBucket(size_t bucket_index) {
// some objects, then the allocation will be handled normally. Otherwise,
// this goes to the central allocator, which will service the allocation,
// return nullptr or crash.
if (!slot_start) {
if (!slot_start)
break;
}
allocated_slots++;
PutInBucket(bucket, slot_start);
@ -648,9 +634,8 @@ void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
template <bool crash_on_corruption>
void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
// Avoids acquiring the lock needlessly.
if (!bucket.count || bucket.count <= limit) {
if (!bucket.count || bucket.count <= limit)
return;
}
// This serves two purposes: error checking and avoiding stalls when grabbing
// the lock:
@ -732,9 +717,8 @@ void ThreadCache::ResetForTesting() {
size_t ThreadCache::CachedMemory() const {
size_t total = 0;
for (const Bucket& bucket : buckets_) {
for (const Bucket& bucket : buckets_)
total += bucket.count * static_cast<size_t>(bucket.slot_size);
}
return total;
}
@ -754,9 +738,8 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
stats->batch_fill_count += stats_.batch_fill_count;
#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
for (size_t i = 0; i < internal::kNumBuckets + 1; i++) {
for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
}
#endif // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
// cached_memory_ is not necessarily equal to |CachedMemory()| here, since
@ -784,10 +767,9 @@ void ThreadCache::TryPurge() {
// static
void ThreadCache::PurgeCurrentThread() {
auto* tcache = Get();
if (IsValid(tcache)) {
if (IsValid(tcache))
tcache->Purge();
}
}
void ThreadCache::PurgeInternal() {
PurgeInternalHelper<true>();
@ -807,9 +789,8 @@ void ThreadCache::PurgeInternalHelper() {
// |largest_active_bucket_index_| can be lowered at runtime, there may be
// memory already cached in the inactive buckets. They should still be
// purged.
for (auto& bucket : buckets_) {
for (auto& bucket : buckets_)
ClearBucketHelper<crash_on_corruption>(bucket, 0);
}
}
} // namespace partition_alloc

View File

@ -195,9 +195,7 @@ class ReentrancyGuard {
} // namespace internal
#define PA_REENTRANCY_GUARD(x) \
internal::ReentrancyGuard guard { \
x \
}
internal::ReentrancyGuard guard { x }
#else // BUILDFLAG(PA_DCHECK_IS_ON)
@ -495,9 +493,8 @@ PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
ClearBucket(bucket, limit / 2);
}
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
PurgeInternal();
}
*slot_size = bucket.slot_size;
return true;
@ -530,10 +527,9 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// Very unlikely, means that the central allocator is out of memory. Let it
// deal with it (may return 0, may crash).
if (PA_UNLIKELY(!bucket.freelist_head)) {
if (PA_UNLIKELY(!bucket.freelist_head))
return 0;
}
}
PA_DCHECK(bucket.count != 0);
internal::PartitionFreelistEntry* entry = bucket.freelist_head;
@ -631,12 +627,12 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
bucket.count++;
}
PA_ALWAYS_INLINE void ThreadCache::RecordAllocation(size_t size) {
void ThreadCache::RecordAllocation(size_t size) {
thread_alloc_stats_.alloc_count++;
thread_alloc_stats_.alloc_total_size += size;
}
PA_ALWAYS_INLINE void ThreadCache::RecordDeallocation(size_t size) {
void ThreadCache::RecordDeallocation(size_t size) {
thread_alloc_stats_.dealloc_count++;
thread_alloc_stats_.dealloc_total_size += size;
}

View File

@ -82,10 +82,7 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
version_incremental_(StrDupParam(params, 24)),
hardware_(StrDupParam(params, 25)),
is_at_least_t_(GetIntParam(params, 26)),
is_automotive_(GetIntParam(params, 27)),
is_at_least_u_(GetIntParam(params, 28)),
targets_at_least_u_(GetIntParam(params, 29)),
codename_(StrDupParam(params, 30)) {}
is_automotive_(GetIntParam(params, 27)) {}
// static
BuildInfo* BuildInfo::GetInstance() {

View File

@ -146,12 +146,6 @@ class BASE_EXPORT BuildInfo {
bool is_automotive() const { return is_automotive_; }
bool is_at_least_u() const { return is_at_least_u_; }
bool targets_at_least_u() const { return targets_at_least_u_; }
const char* codename() const { return codename_; }
private:
friend struct BuildInfoSingletonTraits;
@ -190,9 +184,6 @@ class BASE_EXPORT BuildInfo {
const char* const hardware_;
const bool is_at_least_t_;
const bool is_automotive_;
const bool is_at_least_u_;
const bool targets_at_least_u_;
const char* const codename_;
};
} // namespace android

View File

@ -60,10 +60,7 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
version_incremental_(""),
hardware_(""),
is_at_least_t_(false),
is_automotive_(false),
is_at_least_u_(false),
targets_at_least_u_(false),
codename_("") {}
is_automotive_(false) {}
// static
BuildInfo* BuildInfo::GetInstance() {

View File

@ -11,7 +11,6 @@
#include "base/base_jni_headers/FieldTrialList_jni.h"
#include "base/lazy_instance.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_list_including_low_anonymity.h"
#include "base/metrics/field_trial_params.h"
using base::android::ConvertJavaStringToUTF8;
@ -78,39 +77,15 @@ static ScopedJavaLocalRef<jstring> JNI_FieldTrialList_GetVariationParameter(
env, parameters[ConvertJavaStringToUTF8(env, jparameter_key)]);
}
// JNI_FieldTrialList_LogActiveTrials() is static function, this makes friending
// it a hassle because it must be declared in the file that the friend
// declaration is in, but its declaration can't be included in multiple places
// or things get messy and the linker gets mad. This helper class exists only to
// friend the JNI function and is, in turn, friended by
// FieldTrialListIncludingLowAnonymity which allows for the private
// GetActiveFieldTrialGroups() to be reached.
class AndroidFieldTrialListLogActiveTrialsFriendHelper {
private:
friend void ::JNI_FieldTrialList_LogActiveTrials(JNIEnv* env);
static bool AddObserver(base::FieldTrialList::Observer* observer) {
return base::FieldTrialListIncludingLowAnonymity::AddObserver(observer);
}
static void GetActiveFieldTrialGroups(
base::FieldTrial::ActiveGroups* active_groups) {
base::FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups(
active_groups);
}
};
static void JNI_FieldTrialList_LogActiveTrials(JNIEnv* env) {
DCHECK(!g_trial_logger.IsCreated()); // This need only be called once.
LOG(INFO) << "Logging active field trials...";
AndroidFieldTrialListLogActiveTrialsFriendHelper::AddObserver(
&g_trial_logger.Get());
base::FieldTrialList::AddObserver(&g_trial_logger.Get());
// Log any trials that were already active before adding the observer.
std::vector<base::FieldTrial::ActiveGroup> active_groups;
AndroidFieldTrialListLogActiveTrialsFriendHelper::GetActiveFieldTrialGroups(
&active_groups);
base::FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
for (const base::FieldTrial::ActiveGroup& group : active_groups) {
TrialLogger::Log(group.trial_name, group.group_name);
}

View File

@ -12,9 +12,10 @@
#include "base/android/jni_utils.h"
#include "base/base_jni_headers/PiiElider_jni.h"
#include "base/debug/debugging_buildflags.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/threading/thread_local.h"
#include "build/build_config.h"
#include "third_party/abseil-cpp/absl/base/attributes.h"
namespace base {
namespace android {
@ -25,7 +26,8 @@ jobject g_class_loader = nullptr;
jmethodID g_class_loader_load_class_method_id = 0;
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
ABSL_CONST_INIT thread_local void* stack_frame_pointer = nullptr;
base::LazyInstance<base::ThreadLocalPointer<void>>::Leaky
g_stack_frame_pointer = LAZY_INSTANCE_INITIALIZER;
#endif
bool g_fatal_exception_occurred = false;
@ -299,13 +301,17 @@ std::string GetJavaExceptionInfo(JNIEnv* env, jthrowable java_throwable) {
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
JNIStackFrameSaver::JNIStackFrameSaver(void* current_fp)
: resetter_(&stack_frame_pointer, current_fp) {}
JNIStackFrameSaver::JNIStackFrameSaver(void* current_fp) {
previous_fp_ = g_stack_frame_pointer.Pointer()->Get();
g_stack_frame_pointer.Pointer()->Set(current_fp);
}
JNIStackFrameSaver::~JNIStackFrameSaver() = default;
JNIStackFrameSaver::~JNIStackFrameSaver() {
g_stack_frame_pointer.Pointer()->Set(previous_fp_);
}
void* JNIStackFrameSaver::SavedFrame() {
return stack_frame_pointer;
return g_stack_frame_pointer.Pointer()->Get();
}
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)

Some files were not shown because too many files have changed in this diff Show More