Compare commits

...

No commits in common. "369729a74e7a6847b010d8c90ee7598cbccd99d3" and "f032d7911c59a6bcc288519ff30c4381e2912375" have entirely different histories.

1335 changed files with 27296 additions and 23712 deletions

View File

@ -10,13 +10,13 @@ defaults:
shell: bash
working-directory: src
env:
CACHE_EPOCH: 2
CACHE_EPOCH: 1
CCACHE_MAXSIZE: 200M
CCACHE_MAXFILES: 0
SCCACHE_CACHE_SIZE: 200M
jobs:
cache-toolchains-posix:
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Cache toolchains (Linux, OpenWrt, Android)
@ -49,7 +49,7 @@ jobs:
wget https://snapshot.debian.org/archive/debian/20220515T152741Z/pool/main/q/qemu/qemu-user-static_7.0%2Bdfsg-6_amd64.deb
fi
cache-toolchains-win:
runs-on: windows-2022
runs-on: windows-2019
steps:
- uses: actions/checkout@v3
- name: Cache toolchains
@ -79,7 +79,7 @@ jobs:
unzip ninja-win.zip -d ~/bin
fi
cache-toolchains-mac:
runs-on: macos-12
runs-on: macos-11
steps:
- uses: actions/checkout@v3
- uses: actions/cache@v3
@ -93,7 +93,7 @@ jobs:
- run: EXTRA_FLAGS='target_cpu="arm64"' ./get-clang.sh
linux:
needs: cache-toolchains-posix
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
@ -160,7 +160,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
android:
needs: cache-toolchains-posix
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
@ -233,7 +233,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win:
needs: cache-toolchains-win
runs-on: windows-2022
runs-on: windows-2019
strategy:
fail-fast: false
matrix:
@ -297,7 +297,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
mac:
needs: cache-toolchains-mac
runs-on: macos-12
runs-on: macos-11
strategy:
fail-fast: false
matrix:
@ -349,7 +349,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ios:
needs: cache-toolchains-mac
runs-on: macos-12
runs-on: macos-11
strategy:
fail-fast: false
matrix:
@ -382,7 +382,7 @@ jobs:
- run: ccache -s
openwrt:
needs: cache-toolchains-posix
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:

View File

@ -1 +1 @@
107.0.5304.87
108.0.5359.94

View File

@ -225,6 +225,7 @@ Chaobin Zhang <zhchbin@gmail.com>
Charles Vaughn <cvaughn@gmail.com>
Cheng Zhao <zcbenz@gmail.com>
Cheng Yu <yuzichengcode@gmail.com>
Cheung Ho <uioptt24@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Greene <cwgreene@amazon.com>
Chris Harrelson <chrishtr@gmail.com>
@ -340,6 +341,7 @@ Egor Starkov <egor.starkov@samsung.com>
Ehsan Akhgari <ehsan.akhgari@gmail.com>
Ehsan Akhgari <ehsan@mightyapp.com>
Elan Ruusamäe <elan.ruusamae@gmail.com>
Emil Suleymanov <emil@esnx.xyz>
Ergun Erdogmus <erdogmusergun@gmail.com>
Eric Ahn <byungwook.ahn@gmail.com>
Eric Huang <ele828@gmail.com>
@ -436,6 +438,7 @@ Heeyoun Lee <heeyoun.lee@samsung.com>
Henrique de Carvalho <decarv.henrique@gmail.com>
Henrique Limas <henrique.ramos.limas@gmail.com>
Himanshu Joshi <h.joshi@samsung.com>
Hiroki Oshima <hiroki.oshima@gmail.com>
Hiroyuki Matsuda <gsittyz@gmail.com>
Hodol Han <bab6ting@gmail.com>
Holger Kraus <kraush@amazon.com>
@ -1316,6 +1319,7 @@ Yi Zhang <yi.y.zhang@intel.com>
Yizhou Jiang <yizhou.jiang@intel.com>
Yoav Weiss <yoav@yoav.ws>
Yoav Zilberberg <yoav.zilberberg@gmail.com>
Yoichiro Hibara <hibarayoichiro871@gmail.com>
Yong Ling <yongling@tencent.com>
Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com>
@ -1352,6 +1356,7 @@ Zach Bjornson <zbbjornson@gmail.com>
Zachary Capalbo <zach.geek@gmail.com>
Zeno Albisser <zeno.albisser@digia.com>
Zeqin Chen <talonchen@tencent.com>
Zhang Hao <zhanghao.m@bytedance.com>
Zhang Hao <15686357310a@gmail.com>
Zhaoze Zhou <zhaoze.zhou@partner.samsung.com>
Zheda Chen <zheda.chen@intel.com>
@ -1399,8 +1404,9 @@ Duck Duck Go, Inc. <*@duckduckgo.com>
Endless Mobile, Inc. <*@endlessm.com>
EngFlow, Inc. <*@engflow.com>
Estimote, Inc. <*@estimote.com>
Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
Meta Platforms, Inc. <*@fb.com>
Meta Platforms, Inc. <*@meta.com>
Meta Platforms, Inc. <*@oculus.com>
Google Inc. <*@google.com>
Grammarly, Inc. <*@grammarly.com>
Hewlett-Packard Development Company, L.P. <*@hp.com>

362
src/DEPS
View File

@ -72,6 +72,9 @@ vars = {
# TODO(ehmaldonado): Remove this once the bug in gclient is fixed.
'checkout_fuchsia': False,
# Used for downloading the Fuchsia SDK without running hooks.
'checkout_fuchsia_no_hooks': False,
# Pull in Android prebuilts build tools so we can create Java xrefs
'checkout_android_prebuilts_build_tools': False,
@ -100,6 +103,9 @@ vars = {
# restricted to Googlers only.
'checkout_chromium_fsc_test_dependencies': False,
# By default, src-internal checks out //clank.
'checkout_clank_via_src_internal': True,
# By default, do not check out Google Benchmark. The library is only used by a
# few specialized benchmarks that most developers do not interact with. Will
# be overridden by gclient variables.
@ -200,21 +206,12 @@ vars = {
# qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False,
# Whether to checkout test related data. For compile only builder, we should
# consider using this flag to save some resources.
# This is introduced because of crbug.com/1358788.
'checkout_testdata': True,
# Revision of Crubit (trunk on 2022-08-26). This should typically be the
# same as the revision specified in CRUBIT_REVISION in
# tools/rust/update_rust.py. More details and roll instructions can be
# found in tools/rust/README.md.
'crubit_revision': '2c34caee7c3b4c2dfbcb0e935efcbc05ebc0f61d',
# Run 'vpython_common' hook if this is set.
# TODO(crbug.com/1329052): remove this when we remove .vpython.
'enable_vpython_common_crbug_1329052': True,
# By default, download the fuchsia sdk from the public sdk directory.
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
@ -253,7 +250,7 @@ vars = {
# luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78',
'luci_go': 'git_revision:9f65ffe719f73af390727d369b342c22fa37ea54',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision.
@ -280,18 +277,20 @@ vars = {
# Use the experimental version of the RTS model
'checkout_rts_experimental_model': False,
# By default, do not check out the re-client binaries.
'checkout_reclient': False,
# Make Dawn skip its standalone dependencies
'dawn_standalone': False,
# reclient CIPD package version
'reclient_version': 're_client_version:0.78.0.6f1e751-gomaip',
'reclient_version': 're_client_version:0.81.1.0853992-gomaip',
# Enable fetching Rust-related packages.
# Fetch Rust-related packages.
'use_rust': False,
# Fetch dependencies needed to build Rust toolchain. Not needed if developing
# Rust code in Chromium; instead enable use_rust. Only use if building the
# Rust toolchain.
'checkout_rust_toolchain_deps': False,
'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com',
@ -305,34 +304,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': '3a8c9bc2f275732b2fd1a566becf421e62fe1f46',
'skia_revision': '7c55be996a81ff9c5c66984c9d4ef85d12a44c8c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': '5a8a4a69ac843a0724f94cc2244b66b51f0f6806',
'v8_revision': '3155b0d10c058d2a9f1d7bba00ad398b3e03b841',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': 'bbf57e6db2fab3ee4c4336d6c73786b73aff28b2',
'angle_revision': 'ceec659ac60b0c8ee9d9c602ca1a878ec1d3a88f',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': '9e96423f7ed2c22eea3274e3f4c20dd5ca80a18c',
'swiftshader_revision': 'b22b1b1f2dddcf5eacc8d2a37e7d27f650e1c1e2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': '6d0a3d5365d04967d67399617acc16bc7e7efe52',
'pdfium_revision': '9d2c662f557544e5edb74a60b52fb297f4c5dfee',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '19009c51bff0706362e824f66a0b189326a1c27d',
'boringssl_revision': '1ee71185a2322dc354bee5e5a0abfb1810a27dc6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:9.20220915.2.1',
'fuchsia_version': 'version:9.20221006.5.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -356,7 +355,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': '8e68439a6ffc9e489a70f2c278a016fe15394abf',
'freetype_revision': '0b62c1e43dc4b0e3c50662aac757e4f7321e5466',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
@ -364,7 +363,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other.
'harfbuzz_revision': 'fa471043fccb94444510e3300ac2573297c82137',
'harfbuzz_revision': '56c467093598ec559a7148b61e112e9de52b7076',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other.
@ -376,7 +375,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': 'c06765563619e1d881a26f0e74ef20b69c33c287',
'catapult_revision': '4793433248183dd073e608f655204d4acfdc7193',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
@ -384,7 +383,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': '53692a6c54e3ba615c0641df3ba802bd2e4c2ce7',
'devtools_frontend_revision': '33bb29b551b54b0ac67025e8b3e0ce69352c9504',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
@ -420,11 +419,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': '6ab02659de463f93a0e723845455bf06d00bb683',
'dawn_revision': 'c84d06e8603ce9c4b5c8d86e42e9ec0acf3bd689',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': 'a80d5908299d3d303b7608207e03320662f4d55f',
'quiche_revision': 'a338ea8277642f6d78022dc8e3aaed182a804413',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -438,17 +437,13 @@ vars = {
# and whatever else without interference from each other.
'wuffs_revision': 'a8205c2fe7564b12fea81ee028ba670112cc7719',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libgifcodec
# and whatever else without interference from each other.
'libgifcodec_revision': 'd06d2a6d42baf6c0c91cacc28df2542a911d05fe',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other.
'libavif_revision': 'e0954237c40ff75dbc79991ea4774853ad09bed7',
'libavif_revision': 'de7e6c0d98abcd6843c4a9bf4cee731141dca566',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': 'bbe77d839756d0207f52a13b371e5daaf273854b',
'nearby_revision': '4bd0337c105c502de845ba9501ad6e0350f613b9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -460,15 +455,19 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'cros_components_revision': 'a0979aacb8744f42ed7abd966a6b0ac7578a73e9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'resultdb_version': 'git_revision:6cc18e2763e180929d70c786b419c1f8e6bcc66c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': '5c3e02e92ae8bbc1bf1001bd9ef0d76e044ddb86',
'libcxxabi_revision': '9572e56a12c88c011d504a707ca94952be4664f9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': '60a480ee1819266cf8054548454f99838583cd76',
'libunwind_revision': '1111799723f6a003e6f52202b9bf84387c552081',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -484,14 +483,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': '3dd1a90db3f7ec955ff5476bd4ee5942f093c6fe',
'ffmpeg_revision': 'b9f01c3c54576330b2cf8918c54d5ee5be8faefe',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': 'e2f63a1a48a3cdcacbfc212236050ca5deeacc30',
'libcxx_revision': '64d36e572d3f9719c5d75011a718f33f11126851',
# GN CIPD package version.
'gn_version': 'git_revision:fff29c1b3f9703ea449f720fe70fa73575ef24e5',
'gn_version': 'git_revision:b9c6c19be95a3863e02f00f1fe403b2502e345b6',
# ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
'ninja_version': 'version:2@1.8.2.chromium.3',
}
# Only these hosts are allowed for dependencies in this DEPS file.
@ -566,7 +569,15 @@ deps = {
}
],
'dep_type': 'cipd',
'condition': 'checkout_reclient',
},
'src/third_party/ninja': {
'packages': [
{
'package': 'infra/3pp/tools/ninja/${{platform}}',
'version': Var('ninja_version'),
}
],
'dep_type': 'cipd',
},
'src/third_party/android_rust_toolchain/toolchain': {
'packages': [
@ -579,6 +590,16 @@ deps = {
# TODO(https://crbug.com/1292038): gate this on use_rust as well as host_os.
'condition': 'host_os == "linux"',
},
'src/third_party/rust_src/src': {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': 'version:2@2022-09-14',
},
],
'dep_type': 'cipd',
'condition': 'checkout_rust_toolchain_deps or use_rust',
},
# We don't know target_cpu at deps time. At least until there's a universal
# binary of httpd-php, pull both intel and arm versions in DEPS and then pick
@ -645,6 +666,17 @@ deps = {
'version': '29MbwZukN0c7nlUhmVKLU6ecK99dCu-ZwYa3ICqbwB0C',
},
],
}
,
'src/third_party/updater/chrome_mac_universal_prod': {
'dep_type': 'cipd',
'condition': 'checkout_mac',
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'E3rEUfkgLutRcZKGPJN_yWoC1G-4rTIhzpXGcsUNqCsC',
},
],
},
'src/third_party/updater/chrome_win_x86': {
@ -653,7 +685,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'em43A4AKh3oOpCoZpuUNcLGHrCXvvhbN0ZmH496fxJIC',
'version': 'rqP-urpwa5NOuHhuLVNHyT9d_Psk1xDc8ELSstaIkUUC',
},
],
},
@ -664,7 +696,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'DCttrzwwk19ogJm0ex2eqLSWWtSKjvNYwsvCRQsquhAC',
'version': '7nSN9jjsZ507lwEcJQKUFM_Z2wHmjJmU3nzo1s-r8-UC',
},
],
},
@ -699,7 +731,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'uOq6n_KBa1dlVKW_KFtLKAp0Pm6KyZJegG06QbKLbJUC',
'version': '2yELAOdPaRyB3HuFsiecHXc4zcXVupx9cLa9ZAh-Z2wC',
},
],
},
@ -710,7 +742,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'wKB8ION1Ag7GToknXXyg9vDJH3-qYafDygZ68-yy-ccC',
'version': 'vuc_q-ghg3H11b1O-ENURYlDO8hrcpCc4AuN1Expx3gC',
},
],
},
@ -719,7 +751,7 @@ deps = {
'packages': [
{
'package': 'chromium/chrome/test/data/autofill/captured_sites',
'version': 'VM29jX57EDTykZu3syuhUrnvzW2Ss_503ebhvWpYDIMC',
'version': 'JT0XFwfg09pcNqt5n56Ki9hpRPgfGDSuDd1DRGBSpoYC',
}
],
'condition': 'checkout_chromium_autofill_test_dependencies',
@ -780,17 +812,23 @@ deps = {
'src/chrome/test/data/xr/webvr_info':
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'cf43b2bf3206ff908b2d17be5baba31b7b19f5d3',
'condition': 'checkout_android and checkout_src_internal and not checkout_clank_via_src_internal',
},
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '7627eaaa9d3d78c19f4d09758492061d70425f0f',
'url': Var('chromium_git') + '/website.git' + '@' + '7da061134f35c390ac1549a82704a1762f9a5261',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '4e3e67586cb1cba8aa317f446ca3d367ec8f618b',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + 'abd4e95736740cf61d2c63223396e163d3f08415',
'condition': 'checkout_ios',
},
'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'fa262201b8c29d6160d5773eac72f9a4dccd1c92',
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '904c99f0237920066a507129b0266080db3fda11',
'condition': 'checkout_ios',
},
@ -800,7 +838,7 @@ deps = {
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '572585b60a0344363e5bf1808558ac064a0937ed',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'e7619686aab6b4e438ab51cd3fe03396b2f872c6',
'condition': 'checkout_ios',
},
@ -870,7 +908,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'Y3vDcuXgYTosolRwsaziNc_mik48-WLlwUPsUgxeAnkC',
'version': 'gjjgFT1JcYKD-SV0nFWRTeGr2kufiafn_rvDI-gFW0QC',
},
],
'dep_type': 'cipd',
@ -881,7 +919,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'G2OtoWUEI0-wVZHRLL7YQd7BKr2Jl4jWkAsWBSkWbZYC',
'version': 'xH8MfShB-S7HYkM3gLOUa916ukoEtDJa-8X1bOwfevsC',
},
],
'dep_type': 'cipd',
@ -892,7 +930,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'h8mT4J8MUHPy0GQ5Qwo8gBOsgyn5pioUXwRFrffdhWcC',
'version': 'SWCvrm3LQO_Y0XbcVVs0q2CJOVKn0ImNLJ0WPQDKx5YC',
},
],
'dep_type': 'cipd',
@ -960,7 +998,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': 'BbXH8lVPlEbDvjMAQkb2qRAf9NhWCbsGsAHBt3Yv1aEC',
'version': 'H4XoDJ7V7LZUIhvV2qwFHWYJoIY4MJkGQK-Q2vv-dq4C',
},
],
'condition': 'checkout_android',
@ -1026,7 +1064,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'JpRGSVY_dRFR9MLSN_235CfYK4S9jovg0yqlX98htXIC',
'version': 'DO1bMH_JFEfZXSkAknIs7AfgNh4IwLtJaeMJTdzfuJwC',
},
],
'condition': 'checkout_android',
@ -1037,7 +1075,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': 'tAZpJUnwhFBJmu1ctEKYMLJp7l3qJufDu7ByW6waq3QC',
'version': 'bUREd_PkCqlp2ww6zmyOLGf0jhqgbnf6GT4V1xkAZ10C',
},
],
'condition': 'checkout_android',
@ -1088,7 +1126,7 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'c2d02ffaef3e21df65640bb84cde6ac90d45303e',
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '3d3f3d6f27288d7b0628ae5259238162c5e5ae76',
'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1175,7 +1213,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '144bfad773686216ea39ba9d359d683c84df7ea9',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '6ebc1b94de0dc73bba385f70ddffab9798fd59e5',
'condition': 'checkout_chromeos',
},
@ -1193,7 +1231,7 @@ deps = {
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '6365a697667259c41b193f25b2f05205ebf443d8',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '67bca80707449bad87a17de8c937634ff1ab3272',
'condition': 'checkout_linux',
},
@ -1203,13 +1241,13 @@ deps = {
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'ecfab096397df1f8b266cdb380e057dc31dc0952',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '2c0a8c736a59044e4acc7be9e172343adc5c4310',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + 'd49a550e367233aa22a765b86583e02ad655abfd',
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '5b416729821b589991d492f0707a087f5a47bb1f',
'condition': 'checkout_src_internal',
},
@ -1217,10 +1255,10 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '34780d8bd13d0af0cf17a22789ef286e8512594d',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b3bf8d6a13585ff248c079402654647d298de60b',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
'condition': 'checkout_chromeos',
},
@ -1336,6 +1374,19 @@ deps = {
'dep_type': 'cipd',
},
# Exists for rolling the Fuchsia SDK. Check out of the SDK should always
# rely on the hook running |update_sdk.py| script below.
'src/third_party/fuchsia-sdk/sdk': {
'packages': [
{
'package': Var('fuchsia_sdk_cipd_prefix') + '${{platform}}',
'version': Var('fuchsia_version'),
},
],
'condition': 'checkout_fuchsia_no_hooks',
'dep_type': 'cipd',
},
'src/third_party/hamcrest': {
'packages': [
{
@ -1411,7 +1462,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + 'fd35001068a3d726e8184e1721e186dd50fd20e6',
Var('aomedia_git') + '/aom.git' + '@' + '4ebecefe77953f226e620821fe441e24547a121f',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1425,6 +1476,9 @@ deps = {
'src/third_party/ukey2/src':
Var('chromium_git') + '/external/github.com/google/ukey2.git' + '@' + Var('ukey2_revision'),
'src/third_party/cros_components':
Var('chromium_git') + '/external/google3/cros_components.git' + '@' + Var('cros_components_revision'),
# Userspace interface to kernel DRM services.
'src/third_party/libdrm/src': {
'url': Var('chromium_git') + '/chromiumos/third_party/libdrm.git' + '@' + '56f81e6776c1c100c3f627b2c1feb9dcae2aad3c',
@ -1464,7 +1518,7 @@ deps = {
},
'src/third_party/libunwindstack': {
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '3c86843ae0f8d560ae0d15b92e34ce88cf83057a',
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '8740b09bd1f8b81bdba92766afcb9df1d6a1f14e',
'condition': 'checkout_android',
},
@ -1478,7 +1532,7 @@ deps = {
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '3e38ce50589d9319badc0501f96d6c5b2b177472',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '00950840d1c9bcbb3eb6ebc5aac5793e71166c8b',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1564,7 +1618,7 @@ deps = {
},
'src/third_party/neon_2_sse/src':
Var('chromium_git') + '/external/github.com/intel/ARM_NEON_2_x86_SSE.git' + '@' + '8dbe2461c89760ac4b204aa0eafb72413a97957d',
Var('chromium_git') + '/external/github.com/intel/ARM_NEON_2_x86_SSE.git' + '@' + 'a15b489e1222b2087007546b4912e21293ea86ff',
'src/third_party/netty-tcnative/src': {
'url': Var('chromium_git') + '/external/netty-tcnative.git' + '@' + '7eeb50be90c9ba0f6afa3375132df63942a0f32d',
@ -1591,7 +1645,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '9ed6b71cf22ae4558896f2efe254b5ce62d7c7a3',
Var('chromium_git') + '/openscreen' + '@' + '940f6edf1274146fa1bfbda146b98d6aa16a0887',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1608,7 +1662,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'eee5bf9ccb87c0f8cf874974e9c8708491f038df',
Var('android_git') + '/platform/external/perfetto.git' + '@' + '280f0b23c5c8b98248cf0ccf3d011c4fd4bb74f5',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1642,7 +1696,7 @@ deps = {
},
'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'cc1c9db8bf5155d89d10d65998cdb226f676492c',
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'd2836d1b1c34c4e330a85a1006201db474bf2c8a',
'src/third_party/r8': {
'packages': [
@ -1662,7 +1716,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': '4DSZMtRXZITDo6YY90ljp92vzRT0eY52akTTR-2Jh7kC',
'version': 'Q3q0H5fP-O3El4ZE6Mg7vrySyorEF6YrGFs1gRr_PekC',
},
],
'condition': 'checkout_android',
@ -1740,27 +1794,27 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '620ab3e167b0a64b6732dac16fd0edaf8284cb8e',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '14b52bb67edccf9f250085f83cc0e8aad03824f0',
'src/third_party/turbine': {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 'RXO2k7-PyXvbDjiK9EjbsheQfxXme2n0ABNX-MxR0JcC',
'version': 'rrpgWQ-uylo8c5IPgUVP464LwcVOmt29MqwsR59O_zkC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3811e73c4b6fff9ffc641719727171079c43972b',
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@f310d85385dfddbe1deeb05deda1045593225710',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
# Display server protocol for Linux.
'src/third_party/wayland/src': {
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland.git' + '@' + 'e60398b1755bfcdf09f040d3769131fe0d9762fc',
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland.git' + '@' + 'c7473676b8abc682e820546287044cee3bca9147',
'condition': 'checkout_linux',
},
@ -1782,20 +1836,18 @@ deps = {
'condition': 'checkout_linux',
},
# Keep this to the same revision as the one .vpython3.
'src/third_party/webdriver/pylib':
Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'd0045ec570c1a77612db35d1e92f05e1d27b4d53',
Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'fc5e7e70c098bfb189a9a74746809ad3c5c34e04',
'src/third_party/webgl/src':
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'dec4d77ee3e525c74ae69f77acf3c9c67dd7e7ce',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'eba1a78f3d741241b0dbee728561b61e9587a686',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'eef098d1c7d50613d8bff2467d674525a9d0c57c',
'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
Var('webrtc_git') + '/src.git' + '@' + '93081d594f7efff72958a79251f53731b99e902b',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1813,7 +1865,7 @@ deps = {
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '8e3d3359f9bec608e09fac1f7054a2a14b1bd73c',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'e8f74a9763aa36559980a0c2f37f587794995622',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1822,7 +1874,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'XXyhhDL9TuGs6KyzXXakE4eaVnpYMXz8DKbnU5Ew3aAC',
'version': 'c-P40DdzhvukIRQ1DgesE2cEEU8bTLcd4p_e3LL1--sC',
},
],
'dep_type': 'cipd',
@ -1832,7 +1884,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': 'DCjjZXPp2-aMyq92DomOu_HdsBScNrumV-n3sIRYCfAC',
'version': 'wql7tuE1euGE1rj5JPT6w6ev6KYL3hWzY6HggTHgKZ8C',
},
],
'dep_type': 'cipd',
@ -1843,7 +1895,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': 'FyHlZdO9A-oMbWsU1rxpkz2dOtU4aOPBSJx9YQF_CLoC',
'version': 'OJJWEma6n1Cw5Ja1DQfdwbOFoFVp6071BB8VjklDcyYC',
},
],
'dep_type': 'cipd',
@ -1854,7 +1906,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'K3uVqbmFCdJI0hGDotZSM9kPdXQNp7oIGy7pykJInqAC',
'version': '9sTZ5XDqsy_Dj_v4NU3u4fLI_AGANp-zAJ3sof4rkwQC',
},
],
'dep_type': 'cipd',
@ -1865,7 +1917,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@5dafa2b13dc854da84660cc4b6f5ee6824fb8295',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@a63cd655ad37984fa08e1c95ca73acf55550f10d',
'condition': 'checkout_src_internal',
},
@ -1884,7 +1936,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'Ogm1R0DYIBfiOnmn4JGWi5dKm9oZ8t36ukBVWlXFp18C',
'version': '9yLWNtuRvV_dzod1dEYo01glLiFRGZ2yqhtYQapXSm4C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1895,7 +1947,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': 'FB1uYhNksQfUy3hgfRbQlj2gRCImGE92AyvjStDtLoUC',
'version': '5MAo0K1bcfWGI4F8OuSplMAOM13HLHbGLL85j8dVU7AC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1906,7 +1958,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'dSxPTWwzHZVCkFCCq4wKiCq4YbzDZLXlLj8fBVzc0X8C',
'version': 'HfCwnAI0440kMmt917E1v9QJdzsNuNVfQQ86ehaVDscC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1917,7 +1969,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': '_bQcxwT8nluFLpn_zf1IP97Fl5-_MaonRAN3xpowqCoC',
'version': 'TaHxBUmYiVurXIHHo8Y5mOh7-SEnHbSCW7fn60_Wm54C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -2452,6 +2504,17 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_annotations': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_annotations',
'version': 'version:2@4.1.1.4.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework': {
'packages': [
{
@ -2786,7 +2849,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_code_gson_gson',
'version': 'version:2@2.8.0.cr1',
'version': 'version:2@2.9.0.cr1',
},
],
'condition': 'checkout_android',
@ -3083,7 +3146,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android',
'version': 'version:2@31.0-android.cr1',
'version': 'version:2@31.1-android.cr1',
},
],
'condition': 'checkout_android',
@ -3127,7 +3190,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite',
'version': 'version:2@3.19.3.cr1',
'version': 'version:2@3.21.1.cr1',
},
],
'condition': 'checkout_android',
@ -3178,6 +3241,83 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_api': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_api',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_binder': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_binder',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_context': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_context',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_core': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_core',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_protobuf_lite': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_protobuf_lite',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_stub': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_stub',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_perfmark_perfmark_api': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_perfmark_perfmark_api',
'version': 'version:2@0.25.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/javax_annotation_javax_annotation_api': {
'packages': [
{
@ -3292,7 +3432,18 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual',
'version': 'version:2@3.22.1.cr1',
'version': 'version:2@3.25.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_checkerframework_checker_util': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_util',
'version': 'version:2@3.25.0.cr1',
},
],
'condition': 'checkout_android',
@ -3314,7 +3465,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations',
'version': 'version:2@1.17.cr1',
'version': 'version:2@1.21.cr1',
},
],
'condition': 'checkout_android',
@ -4138,7 +4289,6 @@ hooks = [
{
'name': 'test_fonts',
'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
@ -4152,7 +4302,6 @@ hooks = [
{
'name': 'opus_test_files',
'pattern': '.',
'condition': 'checkout_testdata',
'action': ['python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_auth',
@ -4202,7 +4351,6 @@ hooks = [
{
'name': 'wasm_fuzzer',
'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
@ -4359,7 +4507,6 @@ hooks = [
{
'name': 'maps_perf_test_load_dataset',
'pattern': '\\.sha1',
'condition': 'checkout_testdata',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
@ -4405,7 +4552,6 @@ hooks = [
{
'name': 'zucchini_testdata',
'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
@ -4776,7 +4922,7 @@ hooks = [
{
'name': 'Fetch PGO profiles for mac arm',
'pattern': '.',
'condition': 'checkout_pgo_profiles and checkout_mac',
'condition': 'checkout_pgo_profiles and (checkout_mac or checkout_android)',
'action': [ 'python3',
'src/tools/update_pgo_profiles.py',
'--target=mac-arm',
@ -4829,6 +4975,17 @@ hooks = [
'condition': 'host_os == "win"',
'action': ['python3', 'src/build/del_ninja_deps_cache.py'],
},
# Download test resources for the style perftest.
{
'name': 'style_perftest_files',
'pattern': '.',
'action': ['python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_auth',
'--quiet',
'--bucket', 'chromium-style-perftest',
'-d', 'src/third_party/blink/renderer/core/css/perftest_data'],
},
]
# Add any corresponding DEPS files from this list to chromium.exclusions in
@ -4845,4 +5002,7 @@ recursedeps = [
'src/third_party/vulkan-deps',
# src-internal has its own DEPS file to pull additional internal repos
'src-internal',
# clank has its own DEPS file. This needs to be enabled only when it is
# removed from src-internal's recursedeps.
#'src/clank',
]

View File

@ -31,6 +31,7 @@ import("//build/config/logging.gni")
import("//build/config/nacl/config.gni")
import("//build/config/profiling/profiling.gni")
import("//build/config/rust.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/sysroot.gni")
import("//build/config/ui.gni")
import("//build/rust/mixed_component.gni")
@ -198,7 +199,6 @@ mixed_component("base") {
"allocator/dispatcher/reentry_guard.cc",
"allocator/dispatcher/reentry_guard.h",
"allocator/dispatcher/subsystem.h",
"as_const.h",
"at_exit.cc",
"at_exit.h",
"atomic_ref_count.h",
@ -218,17 +218,13 @@ mixed_component("base") {
"big_endian.cc",
"big_endian.h",
"bind.h",
"bind_internal.h",
"bit_cast.h",
"bits.h",
"build_time.cc",
"build_time.h",
"callback.h",
"callback_forward.h",
"callback_helpers.cc",
"callback_helpers.h",
"callback_internal.cc",
"callback_internal.h",
"callback_list.cc",
"callback_list.h",
"cancelable_callback.h",
@ -287,6 +283,7 @@ mixed_component("base") {
"cpu_reduction_experiment.h",
"critical_closure.h",
"cxx17_backports.h",
"cxx20_is_constant_evaluated.h",
"cxx20_to_address.h",
"dcheck_is_on.h",
"debug/activity_analyzer.cc",
@ -354,6 +351,14 @@ mixed_component("base") {
"files/scoped_temp_dir.cc",
"files/scoped_temp_dir.h",
"format_macros.h",
"functional/bind.h",
"functional/bind_internal.h",
"functional/callback.h",
"functional/callback_forward.h",
"functional/callback_helpers.cc",
"functional/callback_helpers.h",
"functional/callback_internal.cc",
"functional/callback_internal.h",
"functional/function_ref.h",
"functional/identity.h",
"functional/invoke.h",
@ -540,12 +545,18 @@ mixed_component("base") {
"pending_task.h",
"pickle.cc",
"pickle.h",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/moving_average.cc",
"power_monitor/moving_average.h",
"power_monitor/power_monitor.cc",
"power_monitor/power_monitor.h",
"power_monitor/power_monitor_device_source.cc",
"power_monitor/power_monitor_device_source.h",
"power_monitor/power_monitor_features.cc",
"power_monitor/power_monitor_features.h",
"power_monitor/power_monitor_source.cc",
"power_monitor/power_monitor_source.h",
"power_monitor/power_observer.h",
@ -785,6 +796,7 @@ mixed_component("base") {
"task/simple_task_executor.h",
"task/single_thread_task_executor.cc",
"task/single_thread_task_executor.h",
"task/single_thread_task_runner.cc",
"task/single_thread_task_runner.h",
"task/single_thread_task_runner_thread_mode.h",
"task/task_executor.cc",
@ -950,6 +962,7 @@ mixed_component("base") {
"types/expected.h",
"types/expected_internal.h",
"types/id_type.h",
"types/optional_ref.h",
"types/optional_util.h",
"types/pass_key.h",
"types/strong_alias.h",
@ -1070,11 +1083,7 @@ mixed_component("base") {
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
"native_library_win.cc",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_win.cc",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/power_monitor_device_source_win.cc",
"power_monitor/speed_limit_observer_win.cc",
"power_monitor/speed_limit_observer_win.h",
@ -1300,11 +1309,7 @@ mixed_component("base") {
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
"native_library_mac.mm",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_mac.mm",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/battery_state_sampler_mac.cc",
"power_monitor/iopm_power_source_sampling_event_source.cc",
"power_monitor/iopm_power_source_sampling_event_source.h",
@ -1544,9 +1549,6 @@ mixed_component("base") {
"//third_party/abseil-cpp:absl",
]
# Windows cannot use the nodebug assertion handler because it doesn't support
# weak symbols, which are required to override the default libc++
# implementation.
if (use_custom_libcxx && !is_debug) {
public_deps += [ ":nodebug_assertion" ]
}
@ -2549,7 +2551,10 @@ buildflag_header("sanitizer_buildflags") {
header = "sanitizer_buildflags.h"
header_dir = "base"
flags = [ "IS_HWASAN=$is_hwasan" ]
flags = [
"IS_HWASAN=$is_hwasan",
"USING_SANITIZER=$using_sanitizer",
]
}
buildflag_header("tracing_buildflags") {

View File

@ -34,6 +34,14 @@ per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
# Logging-related changes:
per-file check*=olivierli@chromium.org
per-file check*=pbos@chromium.org
per-file dcheck*=olivierli@chromium.org
per-file dcheck*=pbos@chromium.org
per-file logging*=olivierli@chromium.org
per-file logging*=pbos@chromium.org
# Restricted since rand_util.h also backs the cryptographically secure RNG.
per-file rand_util*=set noparent
per-file rand_util*=file://ipc/SECURITY_OWNERS

View File

@ -20,7 +20,7 @@ def CheckChangeLintsClean(input_api, output_api):
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
# Only process those extensions which are used in Chromium, in directories
# that currently lint clean.
CLEAN_CPP_FILES_ONLY = (r'base[\\/]win[\\/].*\.(cc|h)$', )
CLEAN_CPP_FILES_ONLY = (r'base/win/.*\.(cc|h)$', )
source_file_filter = lambda x: input_api.FilterSourceFile(
x,
files_to_check=CLEAN_CPP_FILES_ONLY,
@ -90,9 +90,9 @@ def _CheckNoTraceEventInclude(input_api, output_api):
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
r".*/test/.*",
r".*/trace_event/.*",
r".*/tracing/.*",
]
locations = _FindLocations(input_api, discouraged_includes, files_to_check,
@ -123,9 +123,9 @@ def _WarnPbzeroIncludes(input_api, output_api):
r".*\.(h|cc|mm)$",
]
files_to_skip = [
r".*[\\/]test[\\/].*",
r".*[\\/]trace_event[\\/].*",
r".*[\\/]tracing[\\/].*",
r".*/test/.*",
r".*/trace_event/.*",
r".*/tracing/.*",
]
locations = _FindLocations(input_api, warn_includes, files_to_check,

View File

@ -11,8 +11,27 @@
namespace base {
namespace features {
const BASE_EXPORT Feature kPartitionAllocDanglingPtr{
"PartitionAllocDanglingPtr", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr,
"PartitionAllocUnretainedDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
kUnretainedDanglingPtrModeOption[] = {
{UnretainedDanglingPtrMode::kCrash, "crash"},
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
UnretainedDanglingPtrMode::kDumpWithoutCrashing,
&kUnretainedDanglingPtrModeOption,
};
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"},
@ -27,49 +46,54 @@ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
#if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
const Feature kPartitionAllocPCScanBrowserOnly{
"PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly,
"PartitionAllocPCScanBrowserOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition.
const Feature kPartitionAllocPCScanRendererOnly{
"PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScanRendererOnly,
"PartitionAllocPCScanRendererOnly",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, this instance belongs to the Control group of the BackupRefPtr
// binary experiment.
const Feature kPartitionAllocBackupRefPtrControl{
"PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocBackupRefPtrControl,
"PartitionAllocBackupRefPtrControl",
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size.
const Feature kPartitionAllocLargeThreadCacheSize{
"PartitionAllocLargeThreadCacheSize",
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize,
"PartitionAllocLargeThreadCacheSize",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
// Not unconditionally enabled on 32 bit Android, since it is a more
// memory-constrained platform.
FEATURE_DISABLED_BY_DEFAULT
// Not unconditionally enabled on 32 bit Android, since it is a
// more memory-constrained platform.
FEATURE_DISABLED_BY_DEFAULT
#else
FEATURE_ENABLED_BY_DEFAULT
FEATURE_ENABLED_BY_DEFAULT
#endif
};
);
const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
"PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
"PartitionAllocLargeEmptySlotSpanRing",
FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
const Feature kPartitionAllocBackupRefPtr {
"PartitionAllocBackupRefPtr",
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
FEATURE_DISABLED_BY_DEFAULT
#endif
};
);
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = {
@ -115,8 +139,9 @@ const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
// If enabled, switches the bucket distribution to an alternate one. Only one of
// these features may b e enabled at a time.
const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution{
"PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocUseAlternateDistribution,
"PartitionAllocUseAlternateDistribution",
FEATURE_DISABLED_BY_DEFAULT);
const base::FeatureParam<AlternateBucketDistributionMode>::Option
kPartitionAllocAlternateDistributionOption[] = {
{AlternateBucketDistributionMode::kDefault, "default"},
@ -131,34 +156,39 @@ const base::FeatureParam<AlternateBucketDistributionMode>
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself.
const Feature kPartitionAllocPCScanMUAwareScheduler{
"PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler,
"PartitionAllocPCScanMUAwareScheduler",
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature.
const Feature kPartitionAllocPCScanImmediateFreeing{
"PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing,
"PartitionAllocPCScanImmediateFreeing",
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free().
const Feature kPartitionAllocPCScanEagerClearing{
"PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
"PartitionAllocPCScanEagerClearing",
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator.
const Feature kPartitionAllocPCScanStackScanning {
"PartitionAllocPCScanStackScanning",
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
};
);
const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocDCScan,
"PartitionAllocDCScan",
FEATURE_DISABLED_BY_DEFAULT);
// Whether to sort the active slot spans in PurgeMemory().
extern const Feature kPartitionAllocSortActiveSlotSpans{
"PartitionAllocSortActiveSlotSpans", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans,
"PartitionAllocSortActiveSlotSpans",
FEATURE_DISABLED_BY_DEFAULT);
} // namespace features
} // namespace base

View File

@ -15,12 +15,20 @@
namespace base {
namespace features {
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtr;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr.
kCrash, // (default)
@ -38,14 +46,14 @@ extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
#if defined(PA_ALLOW_PCSCAN)
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
extern const BASE_EXPORT Feature kPartitionAllocPCScanRendererOnly;
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
extern const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtrControl);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
enum class BackupRefPtrEnabledProcesses {
@ -87,7 +95,7 @@ enum class AlternateBucketDistributionMode : uint8_t {
kDenser,
};
extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtr;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr);
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
@ -101,13 +109,13 @@ extern const BASE_EXPORT base::FeatureParam<bool>
extern const BASE_EXPORT base::FeatureParam<AlternateBucketDistributionMode>
kPartitionAllocAlternateBucketDistributionParam;
extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
extern const BASE_EXPORT Feature kPartitionAllocDCScan;
extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
extern const BASE_EXPORT Feature kPartitionAllocSortActiveSlotSpans;
extern const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseAlternateDistribution);
} // namespace features
} // namespace base

View File

@ -5,6 +5,7 @@
#include "base/allocator/partition_alloc_support.h"
#include <array>
#include <cinttypes>
#include <cstdint>
#include <map>
#include <string>
@ -14,6 +15,7 @@
#include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
@ -22,7 +24,9 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/check.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/stack_trace.h"
#include "base/debug/task_trace.h"
#include "base/feature_list.h"
#include "base/immediate_crash.h"
#include "base/metrics/histogram_functions.h"
@ -482,23 +486,22 @@ void DanglingRawPtrReleasedCrash(uintptr_t id) {
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
":\n\n"
"The memory was freed at:\n%s\n"
"The dangling raw_ptr was released at:\n%s",
id, stack_trace_free->ToString().c_str(),
stack_trace_release.ToString().c_str());
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< "The memory was freed at:\n"
<< *stack_trace_free << "\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release;
} else {
LOG(ERROR) << StringPrintf(
"Detected dangling raw_ptr with id=0x%016" PRIxPTR
":\n\n"
"It was not recorded where the memory was freed.\n\n"
"The dangling raw_ptr was released at:\n%s",
id, stack_trace_release.ToString().c_str());
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release;
}
IMMEDIATE_CRASH();
}
@ -543,5 +546,40 @@ void InstallDanglingRawPtrChecks() {
void InstallDanglingRawPtrChecks() {}
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
PA_NO_CODE_FOLDING();
debug::DumpWithoutCrashing();
}
void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
debug::TaskTrace task_trace;
debug::StackTrace stack_trace;
LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< task_trace << stack_trace;
IMMEDIATE_CRASH();
}
void InstallUnretainedDanglingRawPtrChecks() {
if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
return;
}
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
switch (features::kUnretainedDanglingPtrModeParam.Get()) {
case features::UnretainedDanglingPtrMode::kCrash:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedCrash);
break;
case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
break;
}
}
} // namespace allocator
} // namespace base

View File

@ -35,6 +35,7 @@ BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
// This is currently effective, only when compiled with
// `enable_dangling_raw_ptr_checks` build flag.
BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
} // namespace allocator
} // namespace base

View File

@ -0,0 +1,7 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"

View File

@ -115,6 +115,7 @@ component("partition_alloc") {
"partition_alloc_base/scoped_clear_last_error.h",
"partition_alloc_base/strings/stringprintf.cc",
"partition_alloc_base/strings/stringprintf.h",
"partition_alloc_base/system/sys_info.h",
"partition_alloc_base/thread_annotations.h",
"partition_alloc_base/threading/platform_thread.cc",
"partition_alloc_base/threading/platform_thread.h",
@ -258,11 +259,23 @@ component("partition_alloc") {
sources += [
"partition_alloc_base/mac/foundation_util.h",
"partition_alloc_base/mac/foundation_util.mm",
"partition_alloc_base/mac/mac_util.h",
"partition_alloc_base/mac/mac_util.mm",
"partition_alloc_base/mac/scoped_cftyperef.h",
"partition_alloc_base/mac/scoped_typeref.h",
]
if (is_ios) {
sources += [
"partition_alloc_base/ios/ios_util.h",
"partition_alloc_base/ios/ios_util.mm",
"partition_alloc_base/system/sys_info_ios.mm",
]
}
if (is_mac) {
sources += [
"partition_alloc_base/mac/mac_util.h",
"partition_alloc_base/mac/mac_util.mm",
"partition_alloc_base/system/sys_info_mac.mm",
]
}
}
if (build_with_chromium) {
if (current_cpu == "x64") {
@ -318,8 +331,11 @@ component("partition_alloc") {
frameworks += [ "Security.framework" ]
}
if (is_component_build && is_apple) {
frameworks += [ "CoreFoundation.framework" ]
if (is_apple) {
frameworks += [
"CoreFoundation.framework",
"Foundation.framework",
]
}
configs += [ "//build/config/compiler:wexit_time_destructors" ]
@ -352,6 +368,10 @@ buildflag_header("partition_alloc_buildflags") {
_enable_gwp_asan_support = _enable_backup_ref_ptr_support
# Shadow metadata only supports Linux now.
_enable_shadow_metadata_for_64_bits_pointers =
enable_shadow_metadata && is_linux
# The only BRP mode that GWP-ASan supports is the "previous slot" mode.
# This is because it requires out-of-line ref count storage for system
# page aligned allocations.
@ -377,10 +397,15 @@ buildflag_header("partition_alloc_buildflags") {
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$_enable_shadow_metadata_for_64_bits_pointers",
# *Scan is currently only used by Chromium.
"STARSCAN=$build_with_chromium",
# We can only use `//base/tracing` when building in Chromium.
"PA_USE_BASE_TRACING=$build_with_chromium",
"ENABLE_PKEYS=$enable_pkeys",
]
}

View File

@ -5,6 +5,51 @@
# PartitionAlloc is planned to be extracted into a standalone library, and
# therefore dependencies need to be strictly controlled and minimized.
# Only these hosts are allowed for dependencies in this DEPS file.
# This is a subset of chromium/src/DEPS's allowed_hosts.
allowed_hosts = [
'chromium.googlesource.com',
]
vars = {
'chromium_git': 'https://chromium.googlesource.com',
}
deps = {
'partition_allocator/buildtools/clang_format/script':
Var('chromium_git') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git',
'partition_allocator/buildtools/linux64': {
'packages': [
{
'package': 'gn/gn/linux-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "linux"',
},
'partition_allocator/buildtools/mac': {
'packages': [
{
'package': 'gn/gn/mac-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "mac"',
},
'partition_allocator/buildtools/win': {
'packages': [
{
'package': 'gn/gn/windows-amd64',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "win"',
},
}
noparent = True
include_rules = [

View File

@ -12,6 +12,10 @@ details. For implementation details, see the comments in
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and
macros.
* [Chrome-External Builds](./external_builds.md): Further considerations
for standalone PartitionAlloc, plus an embedder's guide for some extra
GN args.
## Overview
PartitionAlloc is a memory allocator optimized for space efficiency,

View File

@ -47,18 +47,14 @@ void DecommitPages(uintptr_t address, size_t size) {
} // namespace
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
PA_CHECK(handle > 0 && handle <= std::size(pools_));
for (pool_handle i = 0; i < std::size(pools_); ++i) {
if (!pools_[i].IsInitialized()) {
pools_[i].Initialize(ptr, length);
return i + 1;
}
}
PA_NOTREACHED();
return 0;
Pool* pool = GetPool(handle);
PA_CHECK(!pool->IsInitialized());
pool->Initialize(ptr, length);
}
void AddressPoolManager::GetPoolUsedSuperPages(
@ -289,12 +285,12 @@ void AddressPoolManager::GetPoolStats(const pool_handle handle,
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 64-bit pool stats.
GetPoolStats(GetRegularPool(), &stats->regular_pool_stats);
GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
GetPoolStats(GetBRPPool(), &stats->brp_pool_stats);
GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (IsConfigurablePoolAvailable()) {
GetPoolStats(GetConfigurablePool(), &stats->configurable_pool_stats);
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
}
return true;
}

View File

@ -54,7 +54,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if defined(PA_HAS_64_BITS_POINTERS)
pool_handle Add(uintptr_t address, size_t length);
void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle);
// Populate a |used| bitset of superpages currently in use.
@ -65,12 +65,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif
// Reserves address space from GigaCage.
// Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length);
// Frees address space back to GigaCage and decommits underlying system pages.
// Frees address space back to the pool and decommits underlying system pages.
void UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length);
@ -158,22 +158,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
#endif // defined(PA_HAS_64_BITS_POINTERS)
static AddressPoolManager singleton_;
static PA_CONSTINIT AddressPoolManager singleton_;
};
PA_ALWAYS_INLINE pool_handle GetRegularPool() {
return kRegularPoolHandle;
}
PA_ALWAYS_INLINE pool_handle GetBRPPool() {
return kBRPPoolHandle;
}
PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
PA_DCHECK(IsConfigurablePoolAvailable());
return kConfigurablePoolHandle;
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -111,17 +111,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
// The only potentially dangerous scenario, in which this check is used, is
// when the assignment of the first raw_ptr<T> object for a non-GigaCage
// address is racing with the allocation of a new GigCage super-page at the
// same address. We assume that if raw_ptr<T> is being initialized with a
// raw pointer, the associated allocation is "alive"; otherwise, the issue
// should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
// In the worst case, when such a fix is impossible, we should just undo the
// raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
// above assumption holds, the existing allocation will prevent us from
// reserving the super-page region and, thus, having the race condition.
// Since we rely on that external synchronization, the relaxed memory
// ordering should be sufficient.
// when the assignment of the first raw_ptr<T> object for an address
// allocated outside the BRP pool is racing with the allocation of a new
// super page at the same address. We assume that if raw_ptr<T> is being
// initialized with a raw pointer, the associated allocation is "alive";
// otherwise, the issue should be fixed by rewriting the raw pointer
// variable as raw_ptr<T>. In the worst case, when such a fix is
// impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of
// the problematic field. If the above assumption holds, the existing
// allocation will prevent us from reserving the super-page region and,
// thus, having the race condition. Since we rely on that external
// synchronization, the relaxed memory ordering should be sufficient.
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
std::memory_order_relaxed);
}

View File

@ -17,7 +17,7 @@ namespace partition_alloc {
struct PoolStats {
size_t usage = 0;
// On 32-bit, GigaCage is mainly a logical entity, intermingled with
// On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case.
#if defined(PA_HAS_64_BITS_POINTERS)

View File

@ -12,6 +12,9 @@ namespace partition_alloc {
namespace {
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
[](uintptr_t) {};
bool g_unretained_dangling_raw_ptr_check_enabled = false;
} // namespace
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
@ -34,6 +37,21 @@ void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
g_dangling_raw_ptr_released_fn = fn;
}
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
return g_unretained_dangling_raw_ptr_detected_fn;
}
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
PA_DCHECK(fn);
g_unretained_dangling_raw_ptr_detected_fn = fn;
}
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
bool old = g_unretained_dangling_raw_ptr_check_enabled;
g_unretained_dangling_raw_ptr_check_enabled = enabled;
return old;
}
namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
@ -43,5 +61,15 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
g_dangling_raw_ptr_released_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id) {
g_unretained_dangling_raw_ptr_detected_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled() {
return g_unretained_dangling_raw_ptr_check_enabled;
}
} // namespace internal
} // namespace partition_alloc

View File

@ -35,6 +35,13 @@ DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
// last dangling raw_ptr stops referencing the memory region.
//
@ -49,6 +56,10 @@ namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled();
} // namespace internal
} // namespace partition_alloc

View File

@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/extended_api.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "base/allocator/partition_allocator/thread_cache.h"

View File

@ -25,6 +25,18 @@ PartitionAlloc's build will expect them at
In addition, something must provide `build_with_chromium = false` to
the PA build system.
## Periodic Memory Reduction Routines
PartitionAlloc provides APIs to
* reclaim memory (see [memory\_reclaimer.h](./memory_reclaimer.h)) and
* purge thread caches (see [thread\_cache.h](./thread_cache.h)).
Both of these must be called by the embedder external to PartitionAlloc.
PA provides neither an event loop nor timers of its own, delegating this
to its clients.
## Build Considerations
External clients create constraints on PartitionAlloc's implementation.
@ -54,4 +66,4 @@ replaced with intrinsics in the presence of `COMPILER_MSVC` (absent
`__clang__`).
[standalone-PA-repo]: https://chromium.googlesource.com/chromium/src/base/allocator/partition_allocator.git
[msvc-inline-assembly]: https://docs.microsoft.com/en-us/cpp/assembler/inline/inline-assembler?view=msvc-170
[msvc-inline-assembly]: https://docs.microsoft.com/en-us/cpp/assembler/inline/inline-assembler?view=msvc-170

View File

@ -89,18 +89,24 @@ Buckets consist of slot spans, organized as linked lists (see below).
holds some not-too-large memory chunks, ready to be allocated. This
speeds up in-thread allocation by reducing a lock hold to a
thread-local storage lookup, improving cache locality.
* **GigaCage**: A memory region several gigabytes wide, reserved by
PartitionAlloc upon initialization, from which all allocations are
taken. The motivation for GigaCage is for code to be able to examine
a pointer and to immediately determine whether or not the memory was
allocated by PartitionAlloc. This provides support for a number of
features, including
[StarScan][starscan-readme] and
[BackupRefPtr][brp-doc].
* Note that GigaCage only exists in builds with 64-bit pointers.
* In builds with 32-bit pointers, PartitionAlloc tracks pointers
it dispenses with a bitmap. This is often referred to as "fake
GigaCage" (or simply "GigaCage") for lack of a better term.
* **Pool**: A large (and contiguous on 64-bit) virtual address region, housing
super pages, etc. from which PartitionAlloc services allocations. The
primary purpose of the pools is to provide a fast answer to the
question, "Did PartitionAlloc allocate the memory for this pointer
from this pool?" with a single bit-masking operation.
* The regular pool is a general purpose pool that contains allocations that
aren't protected by BackupRefPtr.
* The BRP pool contains all allocations protected by BackupRefPtr.
* [64-bit only] The configurable pool is named generically, because its
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime,
providing a pre-existing mapping. Its allocations aren't protected by
BackupRefPtr.
*** promo
Pools are downgraded into a logical concept in 32-bit environments,
tracking a non-contiguous set of allocations using a bitmap.
***
* **Payload**: The usable area of a super page in which slot spans
reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of
@ -124,6 +130,19 @@ By "slow" we may mean something as simple as extra logic (`if`
statements etc.), or something as costly as system calls.
***
## Legacy Terms
These terms are (mostly) deprecated and should not be used. They are
surfaced here to provide a ready reference for readers coming from
older design documents or documentation.
* **GigaCage**: A memory region several gigabytes wide, reserved by
PartitionAlloc upon initialization, from which nearly all allocations
are taken. _Pools_ have overtaken GigaCage in conceptual importance,
and so and so there is less need today to refer to "GigaCage" or the
"cage." This is especially true given the V8 Sandbox and the
configurable pool (see above).
## PartitionAlloc-Everywhere
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
@ -153,5 +172,4 @@ As of 2022, PartitionAlloc-Everywhere is supported on
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
[starscan-readme]: https://chromium.googlesource.com/chromium/src/+/main/base/allocator/partition_allocator/starscan/README.md
[brp-doc]: https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/preview
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#

View File

@ -19,10 +19,8 @@ namespace partition_alloc {
// Posts and handles memory reclaim tasks for PartitionAlloc.
//
// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
// called from any thread, concurrently with reclaim. Reclaim itself runs in the
// context of the provided |SequencedTaskRunner|, meaning that the caller must
// take care of this runner being compatible with the various partitions.
// PartitionAlloc users are responsible for scheduling and calling the
// reclamation methods with their own timers / event loops.
//
// Singleton as this runs as long as the process is alive, and
// having multiple instances would be wasteful.

View File

@ -50,17 +50,19 @@ uintptr_t AllocPagesIncludingReserved(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1) {
uintptr_t ret =
internal::SystemAllocPages(address, length, accessibility, page_tag);
internal::SystemAllocPages(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
if (!ret) {
const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
ret =
internal::SystemAllocPages(address, length, accessibility, page_tag);
ret = internal::SystemAllocPages(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
}
}
return ret;
@ -127,11 +129,12 @@ namespace internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret =
internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
uintptr_t ret = internal::SystemAllocPagesInternal(
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
@ -143,9 +146,10 @@ uintptr_t SystemAllocPages(uintptr_t hint,
uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
page_tag);
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag,
file_descriptor_for_shared_alloc);
}
uintptr_t AllocPages(uintptr_t address,
size_t length,
@ -171,7 +175,8 @@ uintptr_t AllocPagesWithAlignOffset(
size_t align,
size_t align_offset,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
PA_DCHECK(length >= internal::PageAllocationGranularity());
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity());
@ -201,7 +206,8 @@ uintptr_t AllocPagesWithAlignOffset(
for (int i = 0; i < kExactSizeTries; ++i) {
uintptr_t ret =
AllocPagesIncludingReserved(address, length, accessibility, page_tag);
AllocPagesIncludingReserved(address, length, accessibility, page_tag,
file_descriptor_for_shared_alloc);
if (ret) {
// If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset)
@ -234,8 +240,9 @@ uintptr_t AllocPagesWithAlignOffset(
do {
// Continue randomizing only on POSIX.
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
ret = AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag);
ret =
AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag, file_descriptor_for_shared_alloc);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret && (ret = TrimMapping(ret, try_length, length, align,
@ -309,7 +316,8 @@ void RecommitSystemPages(
PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
PA_DCHECK(accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible);
internal::RecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition);
}
@ -323,7 +331,8 @@ bool TryRecommitSystemPages(
// crashing case.
PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
PA_DCHECK(accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible);
return internal::TryRecommitSystemPagesInternal(
address, length, accessibility, accessibility_disposition);
}

View File

@ -11,24 +11,43 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "build/build_config.h"
namespace partition_alloc {
enum class PageAccessibilityConfiguration {
kInaccessible,
kRead,
kReadWrite,
// This flag is mapped to kReadWrite on systems that
// don't support MTE.
kReadWriteTagged,
// This flag is mapped to kReadExecute on systems
// that don't support Arm's BTI.
kReadExecuteProtected,
kReadExecute,
// This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
kReadWriteExecute,
struct PageAccessibilityConfiguration {
enum Permissions {
kInaccessible,
kRead,
kReadWrite,
// This flag is mapped to kReadWrite on systems that
// don't support MTE.
kReadWriteTagged,
// This flag is mapped to kReadExecute on systems
// that don't support Arm's BTI.
kReadExecuteProtected,
kReadExecute,
// This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
kReadWriteExecute,
};
#if BUILDFLAG(ENABLE_PKEYS)
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {}
#else
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS)
Permissions permissions;
#if BUILDFLAG(ENABLE_PKEYS)
// Tag the page with a Memory Protection Key. Use 0 for none.
int pkey;
#endif // BUILDFLAG(ENABLE_PKEYS)
};
// Use for De/RecommitSystemPages API.
@ -78,12 +97,17 @@ uintptr_t NextAlignedWithOffset(uintptr_t ptr,
// |page_tag| is used on some platforms to identify the source of the
// allocation. Use PageTag::kChromium as a catch-all category.
//
// |file_descriptor_for_shared_alloc| is only used in mapping the shadow
// pools to the same physical address as the real one in
// PartitionAddressSpace::Init(). It should be ignored in other cases.
//
// This call will return 0/nullptr if the allocation cannot be satisfied.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(size_t length,
size_t align,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(uintptr_t address,
size_t length,
@ -103,7 +127,8 @@ uintptr_t AllocPagesWithAlignOffset(
size_t align,
size_t align_offset,
PageAccessibilityConfiguration page_accessibility,
PageTag page_tag);
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
// Frees one or more pages starting at |address| and continuing for |length|
// bytes.

View File

@ -77,7 +77,7 @@ PageAllocationGranularityShift() {
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here.
return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON)
#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);

View File

@ -15,7 +15,8 @@ namespace partition_alloc::internal {
uintptr_t SystemAllocPages(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag);
PageTag page_tag,
int file_descriptor_for_shared_alloc = -1);
} // namespace partition_alloc::internal

View File

@ -45,7 +45,7 @@ const char* PageTagToName(PageTag tag) {
zx_vm_option_t PageAccessibilityToZxVmOptions(
PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return ZX_VM_PERM_READ;
case PageAccessibilityConfiguration::kReadWrite:
@ -72,10 +72,12 @@ constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{0};
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
uintptr_t SystemAllocPagesInternal(
uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
zx::vmo vmo;
zx_status_t status = zx::vmo::create(length, 0, &vmo);
if (status != ZX_OK) {

View File

@ -16,7 +16,7 @@
namespace partition_alloc::internal {
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return PROT_READ;
case PageAccessibilityConfiguration::kReadWriteTagged:

View File

@ -21,7 +21,13 @@
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
#if BUILDFLAG(IS_IOS)
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#elif BUILDFLAG(IS_MAC)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
#else
#error "Unknown platform"
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h"
#include <Availability.h>
@ -142,15 +148,18 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility);
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
PageTag page_tag,
int file_descriptor_for_shared_alloc) {
#if BUILDFLAG(IS_APPLE)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
PA_DCHECK(PageTag::kFirst <= page_tag);
PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
int fd = file_descriptor_for_shared_alloc == -1
? VM_MAKE_TAG(static_cast<int>(page_tag))
: file_descriptor_for_shared_alloc;
#else
int fd = -1;
int fd = file_descriptor_for_shared_alloc;
#endif
int access_flag = GetAccessFlags(accessibility);

View File

@ -32,7 +32,7 @@ constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility) {
switch (accessibility.permissions) {
case PageAccessibilityConfiguration::kRead:
return PAGE_READONLY;
case PageAccessibilityConfiguration::kReadWrite:
@ -51,15 +51,17 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
}
}
uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag) {
uintptr_t SystemAllocPagesInternal(
uintptr_t hint,
size_t length,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
DWORD access_flag = GetAccessFlags(accessibility);
const DWORD type_flags =
(accessibility != PageAccessibilityConfiguration::kInaccessible)
? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE;
const DWORD type_flags = (accessibility.permissions !=
PageAccessibilityConfiguration::kInaccessible)
? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE;
void* ret = VirtualAlloc(reinterpret_cast<void*>(hint), length, type_flags,
access_flag);
if (ret == nullptr) {
@ -90,7 +92,8 @@ bool TrySetSystemPagesAccessInternal(
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility == PageAccessibilityConfiguration::kInaccessible)
if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
return nullptr !=
VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility));
@ -101,7 +104,8 @@ void SetSystemPagesAccessInternal(
size_t length,
PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address);
if (accessibility == PageAccessibilityConfiguration::kInaccessible) {
if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible) {
if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.

View File

@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/partition_address_space.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
@ -27,6 +28,10 @@
#include <windows.h>
#endif // BUILDFLAG(IS_WIN)
#if defined(PA_ENABLE_SHADOW_METADATA)
#include <sys/mman.h>
#endif
namespace partition_alloc::internal {
#if defined(PA_HAS_64_BITS_POINTERS)
@ -35,7 +40,7 @@ namespace {
#if BUILDFLAG(IS_WIN)
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
bool IsLegacyWindowsVersion() {
// Use ::RtlGetVersion instead of ::GetVersionEx or helpers from
// VersionHelpers.h because those alternatives change their behavior depending
@ -59,20 +64,20 @@ bool IsLegacyWindowsVersion() {
return version_info.dwMajorVersion < 6 ||
(version_info.dwMajorVersion == 6 && version_info.dwMinorVersion < 3);
}
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
PA_NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}
PA_NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}
#endif // BUILDFLAG(IS_WIN)
PA_NOINLINE void HandleGigaCageAllocFailure() {
PA_NOINLINE void HandlePoolAllocFailure() {
PA_NO_CODE_FOLDING();
uint32_t alloc_page_error_code = GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
@ -82,12 +87,12 @@ PA_NOINLINE void HandleGigaCageAllocFailure() {
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
// it must be VA space exhaustion.
HandleGigaCageAllocFailureOutOfVASpace();
HandlePoolAllocFailureOutOfVASpace();
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
// On Windows <8.1, MEM_RESERVE increases commit charge to account for
// not-yet-committed PTEs needed to cover that VA space, if it was to be
// committed (see crbug.com/1101421#c16).
HandleGigaCageAllocFailureOutOfCommitCharge();
HandlePoolAllocFailureOutOfCommitCharge();
} else
#endif // BUILDFLAG(IS_WIN)
{
@ -98,9 +103,14 @@ PA_NOINLINE void HandleGigaCageAllocFailure() {
} // namespace
alignas(kPartitionCachelineSize)
PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_;
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_ENABLE_SHADOW_METADATA)
std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(IS_IOS)
namespace {
bool IsIOSTestProcess() {
@ -151,25 +161,30 @@ PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsLegacyWindowsVersion() ? kBRPPoolSizeForLegacyWindows : kBRPPoolSize;
}
#endif // BUILDFLAG(IS_IOS)
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
void PartitionAddressSpace::Init() {
if (IsInitialized())
return;
size_t regular_pool_size = RegularPoolSize();
setup_.regular_pool_base_address_ = AllocPages(
regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
#if defined(PA_ENABLE_SHADOW_METADATA)
int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
#else
int regular_pool_fd = -1;
#endif
setup_.regular_pool_base_address_ =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
if (!setup_.regular_pool_base_address_)
HandleGigaCageAllocFailure();
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
HandlePoolAllocFailure();
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
#endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
setup_.regular_pool_ = AddressPoolManager::GetInstance().Add(
setup_.regular_pool_base_address_, regular_pool_size);
PA_CHECK(setup_.regular_pool_ == kRegularPoolHandle);
AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
@ -178,6 +193,11 @@ void PartitionAddressSpace::Init() {
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
size_t brp_pool_size = BRPPoolSize();
#if defined(PA_ENABLE_SHADOW_METADATA)
int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
#else
int brp_pool_fd = -1;
#endif
// Reserve an extra allocation granularity unit before the BRP pool, but keep
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
// is a valid pointer, and having a "forbidden zone" before the BRP pool
@ -186,17 +206,17 @@ void PartitionAddressSpace::Init() {
uintptr_t base_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
if (!base_address)
HandleGigaCageAllocFailure();
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#endif
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
setup_.brp_pool_ = AddressPoolManager::GetInstance().Add(
setup_.brp_pool_base_address_, brp_pool_size);
PA_CHECK(setup_.brp_pool_ == kBRPPoolHandle);
AddressPoolManager::GetInstance().Add(
kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
@ -206,11 +226,29 @@ void PartitionAddressSpace::Init() {
// Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
setup_.regular_pool_, requested_address, kSuperPageSize);
kRegularPoolHandle, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool";
#endif // PA_STARSCAN_USE_CARD_TABLE
#if defined(PA_ENABLE_SHADOW_METADATA)
// Reserve memory for the shadow pools.
uintptr_t regular_pool_shadow_address =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
regular_pool_shadow_offset_ =
regular_pool_shadow_address - setup_.regular_pool_base_address_;
uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
brp_pool_shadow_offset_ =
brp_pool_shadow_address - setup_.brp_pool_base_address_;
#endif
}
void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
@ -230,9 +268,8 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
setup_.configurable_pool_base_address_ = pool_base;
setup_.configurable_pool_base_mask_ = ~(size - 1);
setup_.configurable_pool_ = AddressPoolManager::GetInstance().Add(
setup_.configurable_pool_base_address_, size);
PA_CHECK(setup_.configurable_pool_ == kConfigurablePoolHandle);
AddressPoolManager::GetInstance().Add(
kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
}
void PartitionAddressSpace::UninitForTesting() {
@ -248,17 +285,13 @@ void PartitionAddressSpace::UninitForTesting() {
setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
setup_.regular_pool_ = 0;
setup_.brp_pool_ = 0;
setup_.configurable_pool_ = 0;
AddressPoolManager::GetInstance().ResetForTesting();
}
void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
AddressPoolManager::GetInstance().Remove(setup_.configurable_pool_);
AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
setup_.configurable_pool_ = 0;
}
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)

View File

@ -7,6 +7,7 @@
#include <algorithm>
#include <array>
#include <cstddef>
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
@ -30,16 +31,11 @@ namespace partition_alloc {
namespace internal {
// Reserves address space for PartitionAllocator.
// Manages PartitionAlloc address space, which is split into pools.
// See `glossary.md`.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public:
// BRP stands for BackupRefPtr. GigaCage is split into pools, one which
// supports BackupRefPtr and one that doesn't.
static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() {
return setup_.regular_pool_;
}
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_;
}
@ -49,16 +45,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
}
#endif
static PA_ALWAYS_INLINE internal::pool_handle GetBRPPool() {
return setup_.brp_pool_;
}
// The Configurable Pool can be created inside an existing mapping and so will
// be located outside PartitionAlloc's GigaCage.
static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
return setup_.configurable_pool_;
}
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
@ -68,15 +54,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
pool_handle pool = 0;
uintptr_t base = 0;
if (IsInRegularPool(address)) {
pool = GetRegularPool();
pool = kRegularPoolHandle;
base = setup_.regular_pool_base_address_;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInBRPPool(address)) {
pool = GetBRPPool();
pool = kBRPPoolHandle;
base = setup_.brp_pool_base_address_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInConfigurablePool(address)) {
pool = GetConfigurablePool();
PA_DCHECK(IsConfigurablePoolInitialized());
pool = kConfigurablePoolHandle;
base = setup_.configurable_pool_base_address_;
} else {
PA_NOTREACHED();
@ -90,13 +77,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return kConfigurablePoolMinSize;
}
// Initialize the GigaCage and the Pools inside of it.
// Initialize pools (except for the configurable one).
//
// This function must only be called from the main thread.
static void Init();
// Initialize the ConfigurablePool at the given address |pool_base|. It must
// be aligned to the size of the pool. The size must be a power of two and
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. This
// function must only be called from the main thread.
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()].
//
// This function must only be called from the main thread.
static void InitConfigurablePool(uintptr_t pool_base, size_t size);
static void UninitForTesting();
static void UninitConfigurablePoolForTesting();
@ -104,12 +93,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The
// configurable pool is initialized separately.
if (setup_.regular_pool_) {
PA_DCHECK(setup_.brp_pool_ != 0);
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
return true;
}
PA_DCHECK(setup_.brp_pool_ == 0);
PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress);
return false;
}
@ -120,7 +109,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
@ -135,13 +124,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
#endif
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
}
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
}
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) ==
@ -152,10 +147,21 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return setup_.configurable_pool_base_address_;
}
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
#if defined(PA_ENABLE_SHADOW_METADATA)
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
if (pool == kRegularPoolHandle) {
return regular_pool_shadow_offset_;
} else if (pool == kBRPPoolHandle) {
return brp_pool_shadow_offset_;
} else {
// TODO(crbug.com/1362969): Add shadow for configurable pool as well.
// Shadow is not created for ConfigurablePool for now, so this part should
// be unreachable.
PA_NOTREACHED();
return 0;
}
}
#endif
// PartitionAddressSpace is static_only class.
PartitionAddressSpace() = delete;
@ -164,7 +170,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
void* operator new(size_t, void*) = delete;
private:
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static PA_ALWAYS_INLINE size_t RegularPoolSize();
static PA_ALWAYS_INLINE size_t BRPPoolSize();
#else
@ -175,32 +181,32 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
return kBRPPoolSize;
}
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
// where all allocations have a BRP ref-count, thus pointers pointing there
// can use a BRP protection against UaF. Allocations in the other pools don't
// have that.
// On 64-bit systems, PA allocates from several contiguous, mutually disjoint
// pools. The BRP pool is where all allocations have a BRP ref-count, thus
// pointers pointing there can use a BRP protection against UaF. Allocations
// in the other pools don't have that.
//
// Pool sizes have to be the power of two. Each pool will be aligned at its
// own size boundary.
//
// NOTE! The BRP pool must be preceded by a reserved region, where allocations
// are forbidden. This is to prevent a pointer immediately past a non-GigaCage
// allocation from falling into the BRP pool, thus triggering BRP mechanism
// and likely crashing. This "forbidden zone" can be as small as 1B, but it's
// simpler to just reserve an allocation granularity unit.
// NOTE! The BRP pool must be preceded by an inaccessible region. This is to
// prevent a pointer to the end of a non-BRP-pool allocation from falling into
// the BRP pool, thus triggering BRP mechanism and likely crashing. This
// "forbidden zone" can be as small as 1B, but it's simpler to just reserve an
// allocation granularity unit.
//
// The ConfigurablePool is an optional Pool that can be created inside an
// existing mapping by the embedder, and so will be outside of the GigaCage.
// This Pool can be used when certain PA allocations must be located inside a
// given virtual address region. One use case for this Pool is V8's virtual
// memory cage, which requires that ArrayBuffers be located inside of it.
// existing mapping provided by the embedder. This Pool can be used when
// certain PA allocations must be located inside a given virtual address
// region. One use case for this Pool is V8 Sandbox, which requires that
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
base::bits::IsPowerOfTwo(kBRPPoolSize));
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
// crbug.com/1101421 and crbug.com/1217759).
static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB;
@ -209,7 +215,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) &&
base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
@ -218,7 +224,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if BUILDFLAG(IS_IOS)
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#error iOS is only supported with a dynamically sized GigaCase.
#endif
@ -233,7 +239,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
#endif // BUILDFLAG(IOS_IOS)
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// Masks used to easy determine belonging to a pool.
static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1;
@ -241,29 +247,26 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
#endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#endif // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// This must be set to such a value that IsIn*Pool() always returns false when
// the pool isn't initialized.
static constexpr uintptr_t kUninitializedPoolBaseAddress =
static_cast<uintptr_t>(-1);
struct GigaCageSetup {
struct PoolSetup {
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
constexpr GigaCageSetup()
constexpr PoolSetup()
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
regular_pool_base_mask_(0),
brp_pool_base_mask_(0),
#endif
configurable_pool_base_mask_(0),
regular_pool_(0),
brp_pool_(0),
configurable_pool_(0) {
configurable_pool_base_mask_(0) {
}
// Using a union to enforce padding.
@ -272,29 +275,30 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
uintptr_t regular_pool_base_address_;
uintptr_t brp_pool_base_address_;
uintptr_t configurable_pool_base_address_;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t regular_pool_base_mask_;
uintptr_t brp_pool_base_mask_;
#endif
uintptr_t configurable_pool_base_mask_;
pool_handle regular_pool_;
pool_handle brp_pool_;
pool_handle configurable_pool_;
};
char one_cacheline_[kPartitionCachelineSize];
};
};
static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
"GigaCageSetup has to fill a cacheline(s)");
static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
"PoolSetup has to fill a cacheline(s)");
// See the comment describing the address layout above.
//
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
alignas(kPartitionCachelineSize) static PoolSetup setup_;
#if defined(PA_ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
static std::ptrdiff_t brp_pool_shadow_offset_;
#endif
};
PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
@ -310,6 +314,12 @@ PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
return PartitionAddressSpace::OffsetInBRPPool(address);
}
#if defined(PA_ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
return PartitionAddressSpace::ShadowPoolOffset(pool);
}
#endif
} // namespace internal
// Returns false for nullptr.

View File

@ -49,6 +49,7 @@ PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
#pragma optimize("", on)
#endif
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Used to memset() memory for debugging purposes only.
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses
@ -58,6 +59,7 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}
#endif // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot

View File

@ -84,3 +84,9 @@ assert(enable_backup_ref_ptr_support || !enable_backup_ref_ptr_slow_checks,
assert(
enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks,
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
declare_args() {
enable_pkeys = is_linux && target_cpu == "x64"
}
assert(!enable_pkeys || (is_linux && target_cpu == "x64"),
"Pkeys are only supported on x64 linux")

View File

@ -0,0 +1,36 @@
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base::ios {
// Returns whether the operating system is iOS 12 or later.
// TODO(crbug.com/1129482): Remove once minimum supported version is at least 12
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS12OrLater();
// Returns whether the operating system is iOS 13 or later.
// TODO(crbug.com/1129483): Remove once minimum supported version is at least 13
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS13OrLater();
// Returns whether the operating system is iOS 14 or later.
// TODO(crbug.com/1129484): Remove once minimum supported version is at least 14
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS14OrLater();
// Returns whether the operating system is iOS 15 or later.
// TODO(crbug.com/1227419): Remove once minimum supported version is at least 15
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS15OrLater();
// Returns whether the operating system is at the given version or later.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix);
} // namespace partition_alloc::internal::base::ios
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_

View File

@ -0,0 +1,57 @@
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#include <array>
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
namespace partition_alloc::internal::base::ios {
bool IsRunningOnIOS12OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(12, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS13OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(13, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS14OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(14, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS15OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(15, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const class OSVersion {
public:
OSVersion() {
SysInfo::OperatingSystemVersionNumbers(
&current_version_[0], &current_version_[1], &current_version_[2]);
}
bool IsRunningOnOrLater(int32_t version[3]) const {
for (size_t i = 0; i < std::size(current_version_); ++i) {
if (current_version_[i] != version[i])
return current_version_[i] > version[i];
}
return true;
}
private:
int32_t current_version_[3];
} kOSVersion;
int32_t version[3] = {major, minor, bug_fix};
return kOSVersion.IsRunningOnOrLater(version);
}
} // namespace partition_alloc::internal::base::ios

View File

@ -67,12 +67,13 @@ int g_min_log_level = 0;
// A log message handler that gets notified of every log message we process.
LogMessageHandlerFunction g_log_message_handler = nullptr;
void WriteToFd(int fd, const char* data, size_t length) {
#if !BUILDFLAG(IS_WIN)
void WriteToStderr(const char* data, size_t length) {
size_t bytes_written = 0;
int rv;
while (bytes_written < length) {
rv = PA_HANDLE_EINTR(
write(fd, data + bytes_written, length - bytes_written));
write(STDERR_FILENO, data + bytes_written, length - bytes_written));
if (rv < 0) {
// Give up, nothing we can do now.
break;
@ -80,6 +81,22 @@ void WriteToFd(int fd, const char* data, size_t length) {
bytes_written += rv;
}
}
#else // !BUILDFLAG(IS_WIN)
void WriteToStderr(const char* data, size_t length) {
HANDLE handle = ::GetStdHandle(STD_ERROR_HANDLE);
const char* ptr = data;
const char* ptr_end = data + length;
while (ptr < ptr_end) {
DWORD bytes_written = 0;
if (!::WriteFile(handle, ptr, ptr_end - ptr, &bytes_written, nullptr) ||
bytes_written == 0) {
// Give up, nothing we can do now.
break;
}
ptr += bytes_written;
}
}
#endif // !BUILDFLAG(IS_WIN)
} // namespace
@ -245,18 +262,15 @@ ErrnoLogMessage::~ErrnoLogMessage() {
void RawLog(int level, const char* message) {
if (level >= g_min_log_level && message) {
#if !BUILDFLAG(IS_WIN)
const size_t message_len = strlen(message);
WriteToFd(STDERR_FILENO, message, message_len);
#else // !BUILDFLAG(IS_WIN)
const size_t message_len = ::lstrlenA(message);
#endif // !BUILDFLAG(IS_WIN)
WriteToStderr(message, message_len);
if (message_len > 0 && message[message_len - 1] != '\n') {
int rv;
do {
rv = PA_HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
if (rv < 0) {
// Give up, nothing we can do now.
break;
}
} while (rv != 1);
WriteToStderr("\n", 1);
}
}

View File

@ -7,9 +7,8 @@
#include <AvailabilityMacros.h>
#import <CoreGraphics/CoreGraphics.h>
#include <stdint.h>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base::mac {
@ -19,7 +18,7 @@ namespace internal {
// integer value. For example, for macOS Sierra this returns 1012, and for macOS
// Big Sur it returns 1100. Note that the accuracy returned by this function is
// as granular as the major version number of Darwin.
int MacOSVersion();
PA_COMPONENT_EXPORT(PARTITION_ALLOC) int MacOSVersion();
} // namespace internal
@ -96,6 +95,12 @@ PA_DEFINE_IS_OS_FUNCS(12, PA_TEST_DEPLOYMENT_TARGET)
PA_DEFINE_IS_OS_FUNCS(12, PA_IGNORE_DEPLOYMENT_TARGET)
#endif
#ifdef MAC_OS_VERSION_13_0
PA_DEFINE_IS_OS_FUNCS(13, PA_TEST_DEPLOYMENT_TARGET)
#else
PA_DEFINE_IS_OS_FUNCS(13, PA_IGNORE_DEPLOYMENT_TARGET)
#endif
#undef PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED
#undef PA_DEFINE_OLD_IS_OS_FUNCS
#undef PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED
@ -104,6 +109,13 @@ PA_DEFINE_IS_OS_FUNCS(12, PA_IGNORE_DEPLOYMENT_TARGET)
#undef PA_TEST_DEPLOYMENT_TARGET
#undef PA_IGNORE_DEPLOYMENT_TARGET
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
inline bool IsOSLaterThan13_DontCallThis() {
return !IsAtMostOS13();
}
} // namespace partition_alloc::internal::base::mac
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_

View File

@ -10,28 +10,50 @@
#include <utility>
namespace partition_alloc::internal::base {
// A tag type used for NoDestructor to allow it to be created for a type that
// has a trivial destructor. Use for cases where the same class might have
// different implementations that vary on destructor triviality or when the
// LSan hiding properties of NoDestructor are needed.
struct AllowForTriviallyDestructibleType;
// A wrapper that makes it easy to create an object of type T with static
// storage duration that:
// - is only constructed on first access
// - never invokes the destructor
// in order to satisfy the styleguide ban on global constructors and
// destructors.
// Helper type to create a function-local static variable of type `T` when `T`
// has a non-trivial destructor. Storing a `T` in a `base::NoDestructor<T>` will
// prevent `~T()` from running, even when the variable goes out of scope.
//
// Runtime constant example:
// const std::string& GetLineSeparator() {
// // Forwards to std::string(size_t, char, const Allocator&) constructor.
// static const base::NoDestructor<std::string> s(5, '-');
// Useful when a variable has static storage duration but its type has a
// non-trivial destructor. Chromium bans global constructors and destructors:
// using a function-local static variable prevents the former, while using
// `base::NoDestructor<T>` prevents the latter.
//
// ## Caveats
//
// - Must only be used as a function-local static variable. Declaring a global
// variable of type `base::NoDestructor<T>` will still generate a global
// constructor; declaring a local or member variable will lead to memory leaks
// or other surprising and undesirable behaviour.
//
// - If the data is rarely used, consider creating it on demand rather than
// caching it for the lifetime of the program. Though `base::NoDestructor<T>`
// does not heap allocate, the compiler still reserves space in bss for
// storing `T`, which costs memory at runtime.
//
// - If `T` is trivially destructible, do not use `base::NoDestructor<T>`:
//
// const uint64_t GetUnstableSessionSeed() {
// // No need to use `base::NoDestructor<T>` as `uint64_t` is trivially
// // destructible and does not require a global destructor.
// static const uint64_t kSessionSeed = base::RandUint64();
// return kSessionSeed;
// }
//
// ## Example Usage
//
// const std::string& GetDefaultText() {
// // Required since `static const std::string` requires a global destructor.
// static const base::NoDestructor<std::string> s("Hello world!");
// return *s;
// }
//
// More complex initialization with a lambda:
// const std::string& GetSessionNonce() {
// More complex initialization using a lambda:
//
// const std::string& GetRandomNonce() {
// // `nonce` is initialized with random data the first time this function is
// // called, but its value is fixed thereafter.
// static const base::NoDestructor<std::string> nonce([] {
// std::string s(16);
// crypto::RandString(s.data(), s.size());
@ -40,29 +62,24 @@ struct AllowForTriviallyDestructibleType;
// return *nonce;
// }
//
// NoDestructor<T> stores the object inline, so it also avoids a pointer
// indirection and a malloc. Also note that since C++11 static local variable
// initialization is thread-safe and so is this pattern. Code should prefer to
// use NoDestructor<T> over:
// - A function scoped static T* or T& that is dynamically initialized.
// - A global base::LazyInstance<T>.
// ## Thread safety
//
// Note that since the destructor is never run, this *will* leak memory if used
// as a stack or member variable. Furthermore, a NoDestructor<T> should never
// have global scope as that may require a static initializer.
template <typename T, typename O = std::nullptr_t>
// Initialisation of function-local static variables is thread-safe since C++11.
// The standard guarantees that:
//
// - function-local static variables will be initialised the first time
// execution passes through the declaration.
//
// - if another thread's execution concurrently passes through the declaration
// in the middle of initialisation, that thread will wait for the in-progress
// initialisation to complete.
template <typename T>
class NoDestructor {
public:
static_assert(
!std::is_trivially_destructible<T>::value ||
std::is_same<O, AllowForTriviallyDestructibleType>::value,
"base::NoDestructor is not needed because the templated class has a "
"trivial destructor");
static_assert(std::is_same<O, AllowForTriviallyDestructibleType>::value ||
std::is_same<O, std::nullptr_t>::value,
"AllowForTriviallyDestructibleType is the only valid option "
"for the second template parameter of NoDestructor");
!std::is_trivially_destructible_v<T>,
"T is trivially destructible; please use a function-local static "
"of type T directly instead");
// Not constexpr; just write static constexpr T x = ...; if the value should
// be a constexpr.

View File

@ -0,0 +1,29 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SysInfo {
public:
// Retrieves detailed numeric values for the OS version.
// DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
// for OS version-specific feature checks and workarounds. If you must use
// an OS version check instead of a feature check, use the base::mac::IsOS*
// family from base/mac/mac_util.h, or base::win::GetVersion from
// base/win/windows_version.h.
static void OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version);
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_

View File

@ -0,0 +1,24 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
#import <Foundation/Foundation.h>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
namespace partition_alloc::internal::base {
// static
void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version) {
NSOperatingSystemVersion version =
[[NSProcessInfo processInfo] operatingSystemVersion];
*major_version = saturated_cast<int32_t>(version.majorVersion);
*minor_version = saturated_cast<int32_t>(version.minorVersion);
*bugfix_version = saturated_cast<int32_t>(version.patchVersion);
}
} // namespace partition_alloc::internal::base

View File

@ -0,0 +1,24 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
#import <Foundation/Foundation.h>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
namespace partition_alloc::internal::base {
// static
void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version) {
NSOperatingSystemVersion version =
[[NSProcessInfo processInfo] operatingSystemVersion];
*major_version = saturated_cast<int32_t>(version.majorVersion);
*minor_version = saturated_cast<int32_t>(version.minorVersion);
*bugfix_version = saturated_cast<int32_t>(version.patchVersion);
}
} // namespace partition_alloc::internal::base

View File

@ -31,18 +31,18 @@ static_assert(sizeof(void*) != 8, "");
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
// Use dynamically sized GigaCage. This allows to query the size at run-time,
// before initialization, instead of using a hardcoded constexpr.
// Allow PA to select an alternate pool size at run-time before initialization,
// rather than using a single constexpr value.
//
// This is needed on iOS because iOS test processes can't handle a large cage
// This is needed on iOS because iOS test processes can't handle large pools
// (see crbug.com/1250788).
//
// This is needed on Windows, because OS versions <8.1 incur commit charge even
// on reserved address space, thus don't handle large cage well (see
// on reserved address space, thus don't handle large pools well (see
// crbug.com/1101421 and crbug.com/1217759).
//
// This setting is specific to 64-bit, as 32-bit has a different implementation.
#define PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE
#define PA_DYNAMICALLY_SELECT_POOL_SIZE
#endif // defined(PA_HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
@ -254,10 +254,10 @@ constexpr bool kUseLazyCommit = false;
// Enable shadow metadata.
//
// With this flag, a shadow GigaCage will be mapped, on which writable shadow
// With this flag, shadow pools will be mapped, on which writable shadow
// metadatas are placed, and the real metadatas are set to read-only instead.
// This feature is only enabled with 64-bits CPUs because GigaCage does not
// exist with 32-bits CPUs.
// This feature is only enabled with 64-bit environment because pools work
// differently with 32-bits pointers (see glossary).
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
defined(PA_HAS_64_BITS_POINTERS)
#define PA_ENABLE_SHADOW_METADATA

View File

@ -80,7 +80,7 @@ constexpr size_t kPartitionCachelineSize = 64;
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() {
return 16; // 64 KiB
@ -259,18 +259,16 @@ constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// GigaCage is generally split into two pools, one which supports BackupRefPtr
// (BRP) and one that doesn't.
// PartitionAlloc's address space is split into pools. See `glossary.md`.
#if defined(PA_HAS_64_BITS_POINTERS)
// The 3rd, Configurable Pool is only available in 64-bit mode.
constexpr size_t kNumPools = 3;
// Maximum GigaCage pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE is set, which
// Maximum pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
// allows to choose a different size at initialization time for certain
// configurations.
//
// Special-case Android and iOS, which incur test failures with larger
// GigaCage. Regardless, allocating >8GiB with malloc() on these platforms is
// pools. Regardless, allocating >8GiB with malloc() on these platforms is
// unrealistic as of 2022.
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS)
constexpr size_t kPoolMaxSize = 8 * kGiB;
@ -318,7 +316,7 @@ constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
#if defined(PA_HAS_64_BITS_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the GigaCage.
// because this is the reservation granularity of the pools.
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
return kSuperPageSize;
}
@ -329,7 +327,7 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
#else // defined(PA_HAS_64_BITS_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that GigaCage bitmaps
// unit. However, don't go below partition page size, so that pool bitmaps
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularity() {
@ -456,11 +454,15 @@ constexpr size_t kInvalidBucketSize = 1;
constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
// Usable size for allocations that require the hack.
constexpr size_t kMac11MallocSizeHackUsableSize =
#if BUILDFLAG(PA_DCHECK_IS_ON)
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) || \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE) || \
defined(PA_REF_COUNT_CHECK_COOKIE)
40;
#else
44;
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) ||
// defined(PA_REF_COUNT_STORE_REQUESTED_SIZE) ||
// defined(PA_REF_COUNT_CHECK_COOKIE)
#endif // defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK)
} // namespace internal

View File

@ -45,6 +45,17 @@ namespace partition_alloc::internal {
namespace {
#if defined(PA_ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE uintptr_t ShadowMetadataStart(uintptr_t super_page,
pool_handle pool) {
uintptr_t shadow_metadata_start =
super_page + SystemPageSize() + ShadowPoolOffset(pool);
PA_DCHECK(!PartitionAddressSpace::IsInRegularPool(shadow_metadata_start));
PA_DCHECK(!PartitionAddressSpace::IsInBRPPool(shadow_metadata_start));
return shadow_metadata_start;
}
#endif
template <bool thread_safe>
[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure(
PartitionRoot<thread_safe>* root,
@ -86,8 +97,8 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
#endif // !defined(PA_HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// Reserves |requested_size| worth of super pages from the specified pool of the
// GigaCage. If BRP pool is requested this function will honor BRP block list.
// Reserves |requested_size| worth of super pages from the specified pool.
// If BRP pool is requested this function will honor BRP block list.
//
// The returned address will be aligned to kSuperPageSize, and so
// |requested_address| should be. |requested_size| doesn't have to be, however.
@ -103,9 +114,9 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
// AreAllowedSuperPagesForBRPPool.
// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
// designed to not need locking.
uintptr_t ReserveMemoryFromGigaCage(pool_handle pool,
uintptr_t requested_address,
size_t requested_size) {
uintptr_t ReserveMemoryFromPool(pool_handle pool,
uintptr_t requested_address,
size_t requested_size) {
PA_DCHECK(!(requested_address % kSuperPageSize));
uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve(
@ -115,7 +126,7 @@ uintptr_t ReserveMemoryFromGigaCage(pool_handle pool,
// allocation honors the block list. Find a better address otherwise.
#if !defined(PA_HAS_64_BITS_POINTERS) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == GetBRPPool()) {
if (pool == kBRPPoolHandle) {
constexpr int kMaxRandomAddressTries = 10;
for (int i = 0; i < kMaxRandomAddressTries; ++i) {
if (!reserved_address ||
@ -224,16 +235,16 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
PartitionPage<thread_safe>* page = nullptr;
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
const PartitionTag tag = root->GetNewPartitionTag();
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
{
// Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several
// system calls. With GigaCage, no mmap() (or equivalent) call is made on 64
// bit systems, but page permissions are changed with mprotect(), which is a
// syscall.
// system calls. Although no mmap() (or equivalent) calls are made on
// 64 bit systems, page permissions are changed with mprotect(), which is
// a syscall.
//
// These calls are almost always slow (at least a couple us per syscall on a
// desktop Linux machine), and they also have a very long latency tail,
@ -266,17 +277,15 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PA_DCHECK(slot_size <= available_reservation_size);
#endif
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool();
uintptr_t reservation_start;
{
// Reserving memory from the GigaCage is actually not a syscall on 64 bit
// Reserving memory from the pool is actually not a syscall on 64 bit
// platforms.
#if !defined(PA_HAS_64_BITS_POINTERS)
ScopedSyscallTimer timer{root};
#endif
reservation_start = ReserveMemoryFromGigaCage(pool, 0, reservation_size);
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
}
if (PA_UNLIKELY(!reservation_start)) {
if (return_null)
@ -294,23 +303,41 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(
reservation_start + SystemPageSize(),
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
// used, allocate 2 SystemPages, one for SuperPage metadata and the
// other for RefCount "bitmap" (only one of its elements will be
// used).
(pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
RecommitSystemPages(reservation_start + SystemPageSize(),
SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
PageAccessibilityConfiguration::kRead,
#else
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
PageAccessibilityDisposition::kRequireUpdate);
}
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
// used, allocate a SystemPage for RefCount "bitmap" (only one of its
// elements will be used).
if (pool == kBRPPoolHandle) {
ScopedSyscallTimer timer{root};
RecommitSystemPages(reservation_start + SystemPageSize() * 2,
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(reservation_start, pool),
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
// No need to hold root->lock_. Now that memory is reserved, no other
// overlapping region can be allocated (because of how GigaCage works),
// overlapping region can be allocated (because of how pools work),
// so no other thread can update the same offset table entries at the
// same time. Furthermore, nobody will be ready these offsets until this
// function returns.
@ -382,7 +409,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
SlotSpanMetadata<thread_safe>(&metadata->bucket);
// It is typically possible to map a large range of inaccessible pages, and
// this is leveraged in multiple places, including the GigaCage. However,
// this is leveraged in multiple places, including the pools. However,
// this doesn't mean that we can commit all this memory. For the vast
// majority of allocations, this just means that we crash in a slightly
// different place, but for callers ready to handle failures, we have to
@ -422,9 +449,9 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &metadata->bucket;
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
DirectMapPartitionTagSetValue(slot_start, tag);
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
}
root->lock_.AssertAcquired();
@ -574,7 +601,7 @@ void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
slot_size = new_slot_size;
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const();
empty_slot_spans_head = nullptr;
decommitted_slot_spans_head = nullptr;
num_full_slot_spans = 0;
@ -702,10 +729,8 @@ uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPageSpan(
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
uintptr_t requested_address = root->next_super_page;
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool();
uintptr_t super_page_span_start = ReserveMemoryFromGigaCage(
uintptr_t super_page_span_start = ReserveMemoryFromPool(
pool, requested_address, super_page_count * kSuperPageSize);
if (PA_UNLIKELY(!super_page_span_start)) {
if (flags & AllocFlags::kReturnNull)
@ -780,20 +805,35 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
// also a tiny amount of extent metadata.
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(super_page + SystemPageSize(),
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the
// BRP pool is used, allocate 2 SystemPages, one for
// SuperPage metadata and the other for RefCount bitmap.
(root->ChoosePool() == GetBRPPool())
? SystemPageSize() * 2
: SystemPageSize(),
RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
PageAccessibilityConfiguration::kRead,
#else
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityDisposition::kRequireUpdate);
}
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
// used, allocate a SystemPage for RefCount bitmap.
if (root->ChoosePool() == kBRPPoolHandle) {
ScopedSyscallTimer timer{root};
RecommitSystemPages(super_page + SystemPageSize() * 2, SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(super_page, root->ChoosePool()),
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
// If we were after a specific address, but didn't get it, assume that
// the system chose a lousy address. Here most OS'es have a default
@ -962,10 +1002,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(return_slot, slot_size,
root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
@ -982,10 +1022,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(next_slot, slot_size,
root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry);
@ -1104,8 +1144,7 @@ bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
++num_full_slot_spans;
// Overflow. Most likely a correctness issue in the code. It is in theory
// possible that the number of full slot spans really reaches (1 << 24),
// but this is very unlikely (and not possible with most GigaCage
// settings).
// but this is very unlikely (and not possible with most pool settings).
PA_CHECK(num_full_slot_spans);
// Not necessary but might help stop accidents.
slot_span->next_slot_span = nullptr;
@ -1131,7 +1170,7 @@ bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
} else {
// Active list is now empty.
active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const();
}
return usable_active_list_head;
@ -1179,7 +1218,7 @@ void PartitionBucket<thread_safe>::MaintainActiveList() {
if (!new_active_slot_spans_head) {
new_active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const();
}
active_slot_spans_head = new_active_slot_spans_head;
}
@ -1278,7 +1317,13 @@ void PartitionBucket<thread_safe>::SortActiveSlotSpans() {
// Reverse order, since we insert at the head of the list.
for (int i = index - 1; i >= 0; i--) {
active_spans_array[i]->next_slot_span = active_slot_spans_head;
if (active_spans_array[i] ==
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
// The sentinel is const, don't try to write to it.
PA_DCHECK(active_slot_spans_head == nullptr);
} else {
active_spans_array[i]->next_slot_span = active_slot_spans_head;
}
active_slot_spans_head = active_spans_array[i];
}
}

View File

@ -16,7 +16,7 @@ static constexpr size_t kCookieSize = 16;
// Cookie is enabled for debug builds.
#if BUILDFLAG(PA_DCHECK_IS_ON)
static constexpr unsigned char kCookieValue[kCookieSize] = {
inline constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};

View File

@ -73,7 +73,7 @@ PA_ALWAYS_INLINE void PartitionDirectUnmap(
// This can create a fake "address space exhaustion" OOM, in the case where
// e.g. a large allocation is freed on a thread, and another large one is made
// from another *before* UnmapNow() has finished running. In this case the
// second one may not find enough space in the GigaCage, and fail. This is
// second one may not find enough space in the pool, and fail. This is
// expected to be very rare though, and likely preferable to holding the lock
// while releasing the address space.
ScopedUnlockGuard unlock{root->lock_};
@ -137,16 +137,23 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
}
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>
const SlotSpanMetadata<thread_safe>
SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>*
const SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
return &sentinel_slot_span_;
}
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const() {
return const_cast<SlotSpanMetadata<thread_safe>*>(&sentinel_slot_span_);
}
template <bool thread_safe>
SlotSpanMetadata<thread_safe>::SlotSpanMetadata(
PartitionBucket<thread_safe>* bucket)
@ -312,7 +319,7 @@ void UnmapNow(uintptr_t reservation_start,
#if BUILDFLAG(PA_DCHECK_IS_ON)
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == GetBRPPool()) {
if (pool == kBRPPoolHandle) {
// In 32-bit mode, the beginning of a reservation may be excluded from the
// BRP pool, so shift the pointer. Other pools don't have this logic.
PA_DCHECK(IsManagedByPartitionAllocBRPPool(
@ -327,8 +334,8 @@ void UnmapNow(uintptr_t reservation_start,
} else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{
PA_DCHECK(pool == GetRegularPool() ||
(IsConfigurablePoolAvailable() && pool == GetConfigurablePool()));
PA_DCHECK(pool == kRegularPoolHandle || (IsConfigurablePoolAvailable() &&
pool == kConfigurablePoolHandle));
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
IsManagedByPartitionAllocConfigurablePool(reservation_start));

View File

@ -296,7 +296,13 @@ struct SlotSpanMetadata {
// TODO(ajwong): Can this be made private? https://crbug.com/787153
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
static SlotSpanMetadata* get_sentinel_slot_span();
static const SlotSpanMetadata* get_sentinel_slot_span();
// The sentinel is not supposed to be modified and hence we mark it as const
// under the hood. However, we often store it together with mutable metadata
// objects and need a non-const pointer.
// You can use this function for this case, but you need to ensure that the
// returned object will not be written to.
static SlotSpanMetadata* get_sentinel_slot_span_non_const();
// Slot span state getters.
PA_ALWAYS_INLINE bool is_active() const;
@ -316,7 +322,7 @@ struct SlotSpanMetadata {
//
// Note, this declaration is kept in the header as opposed to an anonymous
// namespace so the getter can be fully inlined.
static SlotSpanMetadata sentinel_slot_span_;
static const SlotSpanMetadata sentinel_slot_span_;
// For the sentinel.
constexpr SlotSpanMetadata() noexcept
: marked_full(0),

View File

@ -8,6 +8,7 @@
#include <atomic>
#include <cstdint>
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -20,10 +21,6 @@
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#endif
namespace partition_alloc::internal {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@ -219,6 +216,18 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
return alive;
}
// Called when a raw_ptr is not banning dangling ptrs, but the user still
// wants to ensure the pointer is not currently dangling. This is currently
// used in UnretainedWrapper to make sure callbacks are not invoked with
// dangling pointers. If such a raw_ptr exists but the allocation is no longer
// alive, then we have a dangling pointer to a dead object.
PA_ALWAYS_INLINE void ReportIfDangling() {
if (!IsAlive()) {
partition_alloc::internal::UnretainedDanglingRawPtrDetected(
reinterpret_cast<uintptr_t>(this));
}
}
// GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
// make sure the `raw_ptr<T>` release operation will never attempt to call the
// PA `free` on such a slot. GWP-ASan takes the extra reference into account

View File

@ -683,7 +683,7 @@ template <bool thread_safe>
#endif // #if !defined(ARCH_CPU_64_BITS)
// Out of memory can be due to multiple causes, such as:
// - Out of GigaCage virtual address space
// - Out of virtual address space in the desired pool
// - Out of commit due to either our process, or another one
// - Excessive allocations in the current process
//
@ -831,7 +831,8 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// We mark the sentinel slot span as free to make sure it is skipped by our
// logic to find a new active slot span.
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
sentinel_bucket.active_slot_spans_head =
SlotSpan::get_sentinel_slot_span_non_const();
// This is a "magic" value so we can test if a root pointer is valid.
inverted_self = ~reinterpret_cast<uintptr_t>(this);

View File

@ -389,10 +389,10 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
~PartitionRoot();
// This will unreserve any space in the GigaCage that the PartitionRoot is
// This will unreserve any space in the pool that the PartitionRoot is
// using. This is needed because many tests create and destroy many
// PartitionRoots over the lifetime of a process, which can exhaust the
// GigaCage and cause tests to fail.
// pool and cause tests to fail.
void DestructForTesting();
#if defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK)
@ -632,12 +632,14 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
internal::pool_handle ChoosePool() const {
if (flags.use_configurable_pool) {
return internal::GetConfigurablePool();
PA_DCHECK(IsConfigurablePoolAvailable());
return internal::kConfigurablePoolHandle;
}
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return brp_enabled() ? internal::GetBRPPool() : internal::GetRegularPool();
return brp_enabled() ? internal::kBRPPoolHandle
: internal::kRegularPoolHandle;
#else
return internal::GetRegularPool();
return internal::kRegularPoolHandle;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
}
@ -1171,8 +1173,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
// Returns whether MTE is supported for this partition root. Because MTE stores
// tagging information in the high bits of the pointer, it causes issues with
// components like V8's ArrayBuffers which use custom pointer representations.
// All custom representations encountered so far rely on a caged memory address
// area / configurable pool, so we use that as a proxy.
// All custom representations encountered so far rely on an "is in configurable
// pool?" check, so we use that as a proxy.
template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
const {
@ -1195,7 +1197,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
uintptr_t object_addr = internal::ObjectPtr2Addr(object);
// On Android, malloc() interception is more fragile than on other
// platforms, as we use wrapped symbols. However, the GigaCage allows us to
// platforms, as we use wrapped symbols. However, the pools allow us to
// quickly tell that a pointer was allocated with PartitionAlloc.
//
// This is a crash to detect imperfect symbol interception. However, we can

View File

@ -60,10 +60,10 @@ static constexpr uint16_t kOffsetTagNormalBuckets =
//
// *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
// until X is unreserved, because PartitionAlloc always uses kSuperPageSize
// alignment when reserving address spaces. One can use "GigaCage" to
// further determine which part of the supe page is used by PartitionAlloc.
// This isn't a problem in 64-bit mode, where allocation granularity is
// kSuperPageSize.
// alignment when reserving address spaces. One can use check "is in pool?"
// to further determine which part of the super page is used by
// PartitionAlloc. This isn't a problem in 64-bit mode, where allocation
// granularity is kSuperPageSize.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
public:
#if defined(PA_HAS_64_BITS_POINTERS)
@ -81,7 +81,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets.");
static struct _ReservationOffsetTable {
static PA_CONSTINIT struct _ReservationOffsetTable {
// The number of table elements is less than MAX_UINT16, so the element type
// can be uint16_t.
static_assert(

View File

@ -10,18 +10,13 @@ include_rules = [
"+base/allocator/buildflags.h",
"+base/allocator/early_zone_registration_mac.h",
"+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_allocator/partition_alloc_base",
"+base/base_export.h",
"+base/bind.h",
"+base/compiler_specific.h",
"+base/feature_list.h",
"+base/ios/ios_util.h",
"+base/logging.h",
"+base/mac/mac_util.h",
"+base/mac/mach_logging.h",
"+base/memory/nonscannable_memory.h",
"+base/memory/page_size.h",
"+base/numerics/checked_math.h",
"+base/numerics/safe_conversions.h",
"+base/process/memory.h",
"+base/synchronization/lock.h",
"+base/threading/platform_thread.h",
@ -35,7 +30,6 @@ include_rules = [
specific_include_rules = {
"allocator_shim_unittest\.cc$": [
"+base/mac/mac_util.h",
"+base/synchronization/waitable_event.h",
"+base/threading/thread_local.h",
],

View File

@ -40,9 +40,9 @@
#include "third_party/apple_apsl/CFBase.h"
#if BUILDFLAG(IS_IOS)
#include "base/ios/ios_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#else
#include "base/mac/mac_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
#endif
namespace allocator_shim {
@ -237,9 +237,9 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
bool CanGetContextForCFAllocator() {
#if BUILDFLAG(IS_IOS)
return !base::ios::IsRunningOnOrLater(17, 0, 0);
return !partition_alloc::internal::base::ios::IsRunningOnOrLater(17, 0, 0);
#else
return !base::mac::IsOSLaterThan13_DontCallThis();
return !partition_alloc::internal::base::mac::IsOSLaterThan13_DontCallThis();
#endif
}

View File

@ -4,9 +4,9 @@
#include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/shim/allocator_shim.h"
#include "base/compiler_specific.h"
#include "base/numerics/checked_math.h"
#include "base/process/memory.h"
#include <dlfcn.h>
@ -36,7 +36,7 @@ constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
// Cannot force glibc's malloc() to crash when a large size is requested, do
// it in the shim instead.
if (UNLIKELY(size >= kMaxAllowedSize))
if (PA_UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_malloc(size);
@ -45,7 +45,7 @@ void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
void* GlibcUncheckedMalloc(const AllocatorDispatch*,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
if (PA_UNLIKELY(size >= kMaxAllowedSize))
return nullptr;
return __libc_malloc(size);
@ -55,8 +55,8 @@ void* GlibcCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
const auto total = base::CheckMul(n, size);
if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
const auto total = partition_alloc::internal::base::CheckMul(n, size);
if (PA_UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size * n);
return __libc_calloc(n, size);
@ -66,7 +66,7 @@ void* GlibcRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
if (PA_UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_realloc(address, size);
@ -76,7 +76,7 @@ void* GlibcMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
if (UNLIKELY(size >= kMaxAllowedSize))
if (PA_UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size);
return __libc_memalign(alignment, size);
@ -86,7 +86,7 @@ void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
__libc_free(address);
}
NO_SANITIZE("cfi-icall")
PA_NO_SANITIZE("cfi-icall")
size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {

View File

@ -16,7 +16,10 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
@ -24,11 +27,7 @@
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/memory/nonscannable_memory.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
@ -78,9 +77,9 @@ class LeakySingleton {
public:
constexpr LeakySingleton() = default;
ALWAYS_INLINE T* Get() {
PA_ALWAYS_INLINE T* Get() {
auto* instance = instance_.load(std::memory_order_acquire);
if (LIKELY(instance))
if (PA_LIKELY(instance))
return instance;
return GetSlowPath();
@ -177,7 +176,7 @@ class MainPartitionConstructor {
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
MainPartitionConstructor>
g_root CONSTINIT = {};
g_root PA_CONSTINIT = {};
partition_alloc::ThreadSafePartitionRoot* Allocator() {
return g_root.Get();
}
@ -194,7 +193,7 @@ class AlignedPartitionConstructor {
LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
AlignedPartitionConstructor>
g_aligned_root CONSTINIT = {};
g_aligned_root PA_CONSTINIT = {};
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
return g_original_root.load(std::memory_order_relaxed);
@ -232,7 +231,7 @@ size_t g_extra_bytes;
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
PA_ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
@ -320,7 +319,9 @@ void* PartitionCalloc(const AllocatorDispatch*,
size_t size,
void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
const size_t total =
partition_alloc::internal::base::CheckMul(n, MaybeAdjustSize(size))
.ValueOrDie();
return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
partition_alloc::PartitionPageSize());
@ -386,9 +387,9 @@ void* PartitionRealloc(const AllocatorDispatch*,
void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE)
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address)) {
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) &&
address)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `realloc` which supports zone-
// dispatching so that it appropriately selects the right zone.
@ -411,9 +412,9 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE)
// TODO(bartekn): Add MTE unmasking here (and below).
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free` which supports zone-
// dispatching so that it appropriately selects the right zone.
@ -426,9 +427,9 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
// the pointer, pass it along. This should not have a runtime cost vs regular
// Android, since on Android we have a PA_CHECK() rather than the branch here.
#if BUILDFLAG(IS_CAST_ANDROID)
if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) &&
object)) {
// A memory region allocated by the system allocator is passed in this
// function. Forward the request to `free()`, which is `__real_free()`
// here.
@ -788,23 +789,26 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size.
info.hblks = base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
info.hblks =
partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
// Resident bytes.
info.hblkhd = base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes);
info.hblkhd =
partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes);
// Allocated bytes.
info.uordblks = base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes);
info.uordblks =
partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes);
return info;
}

View File

@ -15,8 +15,8 @@
#include <new>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if !BUILDFLAG(IS_APPLE)
@ -28,7 +28,7 @@
// it is also needless, since no library used on macOS imports these.
//
// TODO(lizeb): It may not be necessary anywhere to export these.
#define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
#define SHIM_CPP_SYMBOLS_EXPORT PA_NOINLINE
#endif
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {

View File

@ -16,8 +16,8 @@
#include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/numerics/safe_conversions.h"
namespace allocator_shim {
@ -122,8 +122,9 @@ void* AlignAllocation(void* ptr, size_t alignment) {
// Write the prefix.
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
prefix->original_allocation_offset = base::checked_cast<unsigned int>(
address - reinterpret_cast<uintptr_t>(ptr));
prefix->original_allocation_offset =
partition_alloc::internal::base::checked_cast<unsigned int>(
address - reinterpret_cast<uintptr_t>(ptr));
#if BUILDFLAG(PA_DCHECK_IS_ON)
prefix->magic = AlignedPrefix::kMagic;
#endif // BUILDFLAG(PA_DCHECK_IS_ON)

View File

@ -765,10 +765,10 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
private:
#if defined(PA_HAS_64_BITS_POINTERS)
PA_ALWAYS_INLINE static uintptr_t CageBase() {
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
return PartitionAddressSpace::RegularPoolBase();
}
PA_ALWAYS_INLINE static uintptr_t CageMask() {
PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
return PartitionAddressSpace::RegularPoolBaseMask();
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
@ -1274,7 +1274,7 @@ PCScanInternal::~PCScanInternal() = default;
void PCScanInternal::Initialize(PCScan::InitConfig config) {
PA_DCHECK(!is_initialized_);
#if defined(PA_HAS_64_BITS_POINTERS)
// Make sure that GigaCage is initialized.
// Make sure that pools are initialized.
PartitionAddressSpace::Init();
#endif
CommitCardTable();

View File

@ -38,7 +38,7 @@
namespace partition_alloc::internal {
// Iterates over range of memory using the best available SIMD extension.
// Assumes that 64bit platforms have cage support and the begin pointer of
// Assumes that 64bit platforms have pool support and the begin pointer of
// incoming ranges are properly aligned. The class is designed around the CRTP
// version of the "template method" (in GoF terms). CRTP is needed for fast
// static dispatch.
@ -75,7 +75,8 @@ class ScanLoop {
template <typename Derived>
void ScanLoop<Derived>::Run(uintptr_t begin, uintptr_t end) {
// We allow vectorization only for 64bit since they require support of the
// 64bit cage, and only for x86 because a special instruction set is required.
// 64bit regular pool, and only for x86 because a special instruction set is
// required.
#if defined(ARCH_CPU_X86_64)
if (simd_type_ == SimdSupport::kAVX2)
return RunAVX2(begin, end);
@ -95,8 +96,8 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
#if defined(PA_HAS_64_BITS_POINTERS)
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::CageMask() & kPtrUntagMask;
const uintptr_t base = Derived::CageBase();
const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
const uintptr_t base = Derived::RegularPoolBase();
#endif
for (; begin < end; begin += sizeof(uintptr_t)) {
// Read the region word-by-word. Everything that we read is a potential
@ -128,24 +129,24 @@ __attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
// example, according to the Intel docs, on Broadwell and Haswell the CPI of
// vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
// (_mm256_load_pd).
const __m256i vbase = _mm256_set1_epi64x(derived().CageBase());
const __m256i vbase = _mm256_set1_epi64x(derived().RegularPoolBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m256i cage_mask =
_mm256_set1_epi64x(derived().CageMask() & kPtrUntagMask);
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
const __m256i regular_pool_mask =
_mm256_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
static_assert(sizeof(__m256i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details.
const __m256i maybe_ptrs =
_mm256_load_si256(reinterpret_cast<__m256i*>(begin));
const __m256i vand = _mm256_and_si256(maybe_ptrs, cage_mask);
const __m256i vand = _mm256_and_si256(maybe_ptrs, regular_pool_mask);
const __m256i vcmp = _mm256_cmpeq_epi64(vand, vbase);
const int mask = _mm256_movemask_pd(_mm256_castsi256_pd(vcmp));
if (PA_LIKELY(!mask))
continue;
// It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-cage assumption checked above.
// Otherwise, new loads can break in-pool assumption checked above.
if (mask & 0b0001)
derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 0));
if (mask & 0b0010)
@ -167,24 +168,24 @@ __attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
static constexpr size_t kWordsInVector = 2;
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const __m128i vbase = _mm_set1_epi64x(derived().CageBase());
const __m128i vbase = _mm_set1_epi64x(derived().RegularPoolBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m128i cage_mask =
_mm_set1_epi64x(derived().CageMask() & kPtrUntagMask);
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
const __m128i regular_pool_mask =
_mm_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask);
static_assert(sizeof(__m128i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details.
const __m128i maybe_ptrs =
_mm_loadu_si128(reinterpret_cast<__m128i*>(begin));
const __m128i vand = _mm_and_si128(maybe_ptrs, cage_mask);
const __m128i vand = _mm_and_si128(maybe_ptrs, regular_pool_mask);
const __m128i vcmp = _mm_cmpeq_epi64(vand, vbase);
const int mask = _mm_movemask_pd(_mm_castsi128_pd(vcmp));
if (PA_LIKELY(!mask))
continue;
// It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-cage assumption checked above.
// Otherwise, new loads can break in-pool assumption checked above.
if (mask & 0b01) {
derived().CheckPointer(_mm_cvtsi128_si64(maybe_ptrs));
}
@ -208,22 +209,22 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
static constexpr size_t kWordsInVector = 2;
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const uint64x2_t vbase = vdupq_n_u64(derived().CageBase());
const uint64x2_t vbase = vdupq_n_u64(derived().RegularPoolBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const uint64x2_t cage_mask =
vdupq_n_u64(derived().CageMask() & kPtrUntagMask);
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency.
const uint64x2_t regular_pool_mask =
vdupq_n_u64(derived().RegularPoolMask() & kPtrUntagMask);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details.
const uint64x2_t maybe_ptrs = vld1q_u64(reinterpret_cast<uint64_t*>(begin));
const uint64x2_t vand = vandq_u64(maybe_ptrs, cage_mask);
const uint64x2_t vand = vandq_u64(maybe_ptrs, regular_pool_mask);
const uint64x2_t vcmp = vceqq_u64(vand, vbase);
const uint32_t max = vmaxvq_u32(vreinterpretq_u32_u64(vcmp));
if (PA_LIKELY(!max))
continue;
// It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-cage assumption checked above.
// Otherwise, new loads can break in-pool assumption checked above.
if (vgetq_lane_u64(vcmp, 0))
derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 0));
if (vgetq_lane_u64(vcmp, 1))

View File

@ -77,7 +77,7 @@ UserFaultFDWriteProtector::UserFaultFDWriteProtector()
PA_CHECK(-1 != ioctl(uffd_, UFFDIO_API, &uffdio_api));
PA_CHECK(UFFD_API == uffdio_api.api);
// Register the giga-cage to listen uffd events.
// Register the regular pool to listen uffd events.
struct uffdio_register uffdio_register;
uffdio_register.range.start = PartitionAddressSpace::RegularPoolBase();
uffdio_register.range.len = kPoolMaxSize;

View File

@ -12,20 +12,23 @@ namespace base::android::features {
// When the browser process has been in the background for several minutes at a
// time, trigger an artificial critical memory pressure notification. This is
// intended to reduce memory footprint.
const base::Feature kBrowserProcessMemoryPurge{"BrowserProcessMemoryPurge",
FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kBrowserProcessMemoryPurge,
"BrowserProcessMemoryPurge",
FEATURE_DISABLED_BY_DEFAULT);
// Crash the browser process if a child process is created which does not match
// the browser process and the browser package appears to have changed since the
// browser process was launched, so that the browser process will be started
// fresh when next used, hopefully resolving the issue.
const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged{
"CrashBrowserOnChildMismatchIfBrowserChanged", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged,
"CrashBrowserOnChildMismatchIfBrowserChanged",
FEATURE_DISABLED_BY_DEFAULT);
// Crash the browser process if a child process is created which does not match
// the browser process regardless of whether the browser package appears to have
// changed.
const base::Feature kCrashBrowserOnAnyChildMismatch{
"CrashBrowserOnAnyChildMismatch", FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kCrashBrowserOnAnyChildMismatch,
"CrashBrowserOnAnyChildMismatch",
FEATURE_DISABLED_BY_DEFAULT);
} // namespace base::android::features

View File

@ -13,9 +13,9 @@ namespace base::android::features {
// alongside the definition of their values in the .cc file.
// Alphabetical:
extern const base::Feature kBrowserProcessMemoryPurge;
extern const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged;
extern const base::Feature kCrashBrowserOnAnyChildMismatch;
BASE_DECLARE_FEATURE(kBrowserProcessMemoryPurge);
BASE_DECLARE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged);
BASE_DECLARE_FEATURE(kCrashBrowserOnAnyChildMismatch);
} // namespace base::android::features

View File

@ -35,6 +35,8 @@ enum SdkVersion {
SDK_VERSION_Q = 29,
SDK_VERSION_R = 30,
SDK_VERSION_S = 31,
SDK_VERSION_Sv2 = 32,
SDK_VERSION_T = 33,
};
// BuildInfo is a singleton class that stores android build and device

View File

@ -35,6 +35,9 @@ class TrialLogger : public base::FieldTrialList::Observer {
static void Log(const std::string& trial_name,
const std::string& group_name) {
// Changes to format of the log message below must be accompanied by
// changes to finch smoke tests since they look for this log message
// in the logcat.
LOG(INFO) << "Active field trial \"" << trial_name
<< "\" in group \"" << group_name<< '"';
}

View File

@ -160,8 +160,9 @@ JavaHandlerThread::State::State()
sequence_manager::SequenceManager::Settings::Builder()
.SetMessagePumpType(base::MessagePumpType::JAVA)
.Build())),
default_task_queue(sequence_manager->CreateTaskQueue(
sequence_manager::TaskQueue::Spec("default_tq"))) {
default_task_queue(
sequence_manager->CreateTaskQueue(sequence_manager::TaskQueue::Spec(
sequence_manager::QueueName::DEFAULT_TQ))) {
// TYPE_JAVA to get the Android java style message loop.
std::unique_ptr<MessagePump> message_pump =
MessagePump::Create(base::MessagePumpType::JAVA);

View File

@ -253,7 +253,7 @@ If a Java object "owns" a native one, store the pointer via
the object. For example, have a `close()` that deletes the native object.
The best way to pass "compound" types across in either direction is to
create an inner class with PODs and a factory function. If possible, make mark
create an inner class with PODs and a factory function. If possible, mark
all the fields as "final".
## Build Rules

View File

@ -1553,18 +1553,18 @@ def GetScriptName():
return os.sep.join(script_components[base_index:])
def _RemoveStaleHeaders(path, output_files):
def _RemoveStaleHeaders(path, output_names):
if not os.path.isdir(path):
return
# Do not remove output files so that timestamps on declared outputs are not
# modified unless their contents are changed (avoids reverse deps needing to
# be rebuilt).
preserve = set(output_files)
preserve = set(output_names)
for root, _, files in os.walk(path):
for f in files:
file_path = os.path.join(root, f)
if file_path not in preserve:
if os.path.isfile(file_path) and os.path.splitext(file_path)[1] == '.h':
if f not in preserve:
file_path = os.path.join(root, f)
if os.path.isfile(file_path) and file_path.endswith('.h'):
os.remove(file_path)
@ -1591,18 +1591,21 @@ See SampleForTests.java for more details.
help='Uses as a namespace in the generated header '
'instead of the javap class name, or when there is '
'no JNINamespace annotation in the java source.')
parser.add_argument(
'--input_file',
action='append',
required=True,
dest='input_files',
help='Input file names, or paths within a .jar if '
'--jar-file is used.')
parser.add_argument(
'--output_file',
action='append',
dest='output_files',
help='Output file names.')
parser.add_argument('--input_file',
action='append',
required=True,
dest='input_files',
help='Input filenames, or paths within a .jar if '
'--jar-file is used.')
parser.add_argument('--output_dir', required=True, help='Output directory.')
# TODO(agrieve): --prev_output_dir used only to make incremental builds work.
# Remove --prev_output_dir at some point after 2022.
parser.add_argument('--prev_output_dir',
help='Delete headers found in this directory.')
parser.add_argument('--output_name',
action='append',
dest='output_names',
help='Output filenames within output directory.')
parser.add_argument(
'--script_name',
default=GetScriptName(),
@ -1651,22 +1654,28 @@ See SampleForTests.java for more details.
parser.add_argument(
'--split_name',
help='Split name that the Java classes should be loaded from.')
# TODO(agrieve): --stamp used only to make incremental builds work.
# Remove --stamp at some point after 2022.
parser.add_argument('--stamp',
help='Process --prev_output_dir and touch this file.')
args = parser.parse_args()
input_files = args.input_files
output_files = args.output_files
if output_files:
output_dirs = set(os.path.dirname(f) for f in output_files)
if len(output_dirs) != 1:
parser.error(
'jni_generator only supports a single output directory per target '
'(got {})'.format(output_dirs))
output_dir = output_dirs.pop()
output_names = args.output_names
if args.prev_output_dir:
_RemoveStaleHeaders(args.prev_output_dir, [])
if args.stamp:
build_utils.Touch(args.stamp)
sys.exit(0)
if output_names:
# Remove existing headers so that moving .java source files but not updating
# the corresponding C++ include will be a compile failure (otherwise
# incremental builds will usually not catch this).
_RemoveStaleHeaders(output_dir, output_files)
_RemoveStaleHeaders(args.output_dir, output_names)
else:
output_files = [None] * len(input_files)
output_names = [None] * len(input_files)
temp_dir = tempfile.mkdtemp()
try:
if args.jar_file:
@ -1674,7 +1683,11 @@ See SampleForTests.java for more details.
z.extractall(temp_dir, input_files)
input_files = [os.path.join(temp_dir, f) for f in input_files]
for java_path, header_path in zip(input_files, output_files):
for java_path, header_name in zip(input_files, output_names):
if header_name:
header_path = os.path.join(args.output_dir, header_name)
else:
header_path = None
GenerateJNIHeader(java_path, header_path, args)
finally:
shutil.rmtree(temp_dir)

View File

@ -846,7 +846,9 @@ def _MakeProxySignature(proxy_native,
signature_template = string.Template("""
// Hashed name: ${ALT_NAME}""" + native_method_line)
alt_name = proxy_native.hashed_proxy_name
# We add the prefix that is sometimes used so that codesearch can find it if
# someone searches a full method name from the stacktrace.
alt_name = f'Java_J_N_{proxy_native.hashed_proxy_name}'
proxy_name = proxy_native.proxy_name
return signature_template.substitute({

View File

@ -70,17 +70,6 @@
<fields>;
}
# Workaround for crbug/1002847. Methods of BaseGmsClient are incorrectly
# removed even though they are required for the derived class GmsClient
# to correctly implement Api$Client.
# TODO: remove once crbug/1002847 resolved.
-keep public class com.google.android.gms.common.internal.BaseGmsClient {
public void disconnect();
public void dump(java.lang.String,java.io.FileDescriptor,java.io.PrintWriter,java.lang.String[]);
public int getMinApkVersion();
public boolean requiresSignIn();
}
# Remove calls to String.format() where the result goes unused. This can mask
# exceptions if the parameters to String.format() are invalid, but such cases
# are generally programming bugs anyways.

View File

@ -5,9 +5,6 @@
# Contains flags that can be safely shared with Cronet, and thus would be
# appropriate for third-party apps to include.
# Android support library annotations will get converted to androidx ones
# which we want to keep.
-keep @interface androidx.annotation.Keep
-keep @androidx.annotation.Keep class *
-keepclasseswithmembers,allowaccessmodification class * {
@androidx.annotation.Keep <fields>;

View File

@ -11,18 +11,35 @@ namespace base {
namespace android {
namespace {
RadioUtils::OverrideForTesting* g_overrider_for_tests = nullptr;
bool InitializeIsSupported() {
JNIEnv* env = AttachCurrentThread();
return Java_RadioUtils_isSupported(env);
}
} // namespace
RadioUtils::OverrideForTesting::OverrideForTesting() {
DCHECK(!g_overrider_for_tests);
g_overrider_for_tests = this;
}
RadioUtils::OverrideForTesting::~OverrideForTesting() {
DCHECK(g_overrider_for_tests);
g_overrider_for_tests = nullptr;
}
bool RadioUtils::IsSupported() {
static const bool kIsSupported = InitializeIsSupported();
return kIsSupported;
}
RadioConnectionType RadioUtils::GetConnectionType() {
if (g_overrider_for_tests) {
// If GetConnectionType is being used in tests
return g_overrider_for_tests->GetConnectionType();
}
if (!IsSupported())
return RadioConnectionType::kUnknown;

View File

@ -39,6 +39,20 @@ enum class RadioConnectionType {
class BASE_EXPORT RadioUtils {
public:
class OverrideForTesting {
public:
OverrideForTesting();
~OverrideForTesting();
void SetConnectionTypeForTesting(RadioConnectionType connection_type) {
connection_type_ = connection_type;
}
RadioConnectionType GetConnectionType() { return connection_type_; }
private:
RadioConnectionType connection_type_;
};
static bool IsSupported();
static RadioConnectionType GetConnectionType();
static absl::optional<RadioSignalLevel> GetCellSignalLevel();

View File

@ -237,6 +237,8 @@ static void JNI_TraceEvent_InstantAndroidIPC(JNIEnv* env,
});
}
#if BUILDFLAG(ENABLE_BASE_TRACING)
static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
jint block_reason,
jint allow_reason,
@ -262,6 +264,16 @@ static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
});
}
#else // BUILDFLAG(ENABLE_BASE_TRACING)
// Empty implementations when TraceLog isn't available.
static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
jint block_reason,
jint allow_reason,
jint snapshot_diff) {}
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
static void JNI_TraceEvent_Begin(JNIEnv* env,
const JavaParamRef<jstring>& jname,
const JavaParamRef<jstring>& jarg) {

View File

@ -1,24 +0,0 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_AS_CONST_H_
#define BASE_AS_CONST_H_
#include <type_traits>
namespace base {
// C++14 implementation of C++17's std::as_const():
// https://en.cppreference.com/w/cpp/utility/as_const
template <typename T>
constexpr std::add_const_t<T>& as_const(T& t) noexcept {
return t;
}
template <typename T>
void as_const(const T&& t) = delete;
} // namespace base
#endif // BASE_AS_CONST_H_

View File

@ -6,22 +6,34 @@
#include <stddef.h>
#include "base/check.h"
#include "base/numerics/checked_math.h"
#include "third_party/modp_b64/modp_b64.h"
namespace base {
std::string Base64Encode(span<const uint8_t> input) {
std::string output;
output.resize(modp_b64_encode_len(input.size())); // makes room for null byte
// modp_b64_encode_len() returns at least 1, so output[0] is safe to use.
const size_t output_size = modp_b64_encode(
&(output[0]), reinterpret_cast<const char*>(input.data()), input.size());
output.resize(output_size);
Base64EncodeAppend(input, &output);
return output;
}
void Base64EncodeAppend(span<const uint8_t> input, std::string* output) {
// Ensure `modp_b64_encode_len` will not overflow. Note this length and
// `modp_b64_encode`'s output includes a trailing NUL byte.
CHECK_LE(input.size(), MODP_B64_MAX_INPUT_LEN);
size_t encode_len = modp_b64_encode_len(input.size());
size_t prefix_len = output->size();
output->resize(base::CheckAdd(encode_len, prefix_len).ValueOrDie());
const size_t output_size = modp_b64_encode(
output->data() + prefix_len, reinterpret_cast<const char*>(input.data()),
input.size());
// `output_size` does not include the trailing NUL byte, so this removes it.
output->resize(prefix_len + output_size);
}
void Base64Encode(StringPiece input, std::string* output) {
*output = Base64Encode(base::as_bytes(base::make_span(input)));
}

View File

@ -20,6 +20,10 @@ namespace base {
// Encodes the input binary data in base64.
BASE_EXPORT std::string Base64Encode(span<const uint8_t> input);
// Encodes the input binary data in base64 and appends it to the output.
BASE_EXPORT void Base64EncodeAppend(span<const uint8_t> input,
std::string* output);
// Encodes the input string in base64.
BASE_EXPORT void Base64Encode(StringPiece input, std::string* output);

View File

@ -1,341 +1,12 @@
// Copyright 2011 The Chromium Authors
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_BIND_H_
#define BASE_BIND_H_
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include "base/bind_internal.h"
#include "base/compiler_specific.h"
#include "base/memory/raw_ptr.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
#include "base/mac/scoped_block.h"
#endif
// -----------------------------------------------------------------------------
// Usage documentation
// -----------------------------------------------------------------------------
//
// Overview:
// base::BindOnce() and base::BindRepeating() are helpers for creating
// base::OnceCallback and base::RepeatingCallback objects respectively.
//
// For a runnable object of n-arity, the base::Bind*() family allows partial
// application of the first m arguments. The remaining n - m arguments must be
// passed when invoking the callback with Run().
//
// // The first argument is bound at callback creation; the remaining
// // two must be passed when calling Run() on the callback object.
// base::OnceCallback<long(int, long)> cb = base::BindOnce(
// [](short x, int y, long z) { return x * y * z; }, 42);
//
// When binding to a method, the receiver object must also be specified at
// callback creation time. When Run() is invoked, the method will be invoked on
// the specified receiver object.
//
// class C : public base::RefCounted<C> { void F(); };
// auto instance = base::MakeRefCounted<C>();
// auto cb = base::BindOnce(&C::F, instance);
// std::move(cb).Run(); // Identical to instance->F()
//
// See //docs/callback.md for the full documentation.
//
// -----------------------------------------------------------------------------
// Implementation notes
// -----------------------------------------------------------------------------
//
// If you're reading the implementation, before proceeding further, you should
// read the top comment of base/bind_internal.h for a definition of common
// terms and concepts.
namespace base {
// Bind as OnceCallback.
template <typename Functor, typename... Args>
inline OnceCallback<internal::MakeUnboundRunType<Functor, Args...>> BindOnce(
Functor&& functor,
Args&&... args) {
static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
(std::is_rvalue_reference<Functor&&>() &&
!std::is_const<std::remove_reference_t<Functor>>()),
"BindOnce requires non-const rvalue for OnceCallback binding."
" I.e.: base::BindOnce(std::move(callback)).");
static_assert(
std::conjunction<
internal::AssertBindArgIsNotBasePassed<std::decay_t<Args>>...>::value,
"Use std::move() instead of base::Passed() with base::BindOnce()");
return internal::BindImpl<OnceCallback>(std::forward<Functor>(functor),
std::forward<Args>(args)...);
}
// Bind as RepeatingCallback.
template <typename Functor, typename... Args>
inline RepeatingCallback<internal::MakeUnboundRunType<Functor, Args...>>
BindRepeating(Functor&& functor, Args&&... args) {
static_assert(
!internal::IsOnceCallback<std::decay_t<Functor>>(),
"BindRepeating cannot bind OnceCallback. Use BindOnce with std::move().");
return internal::BindImpl<RepeatingCallback>(std::forward<Functor>(functor),
std::forward<Args>(args)...);
}
// Overloads to allow nicer compile errors when attempting to pass the address
// an overloaded function to `BindOnce()` or `BindRepeating()`. Otherwise, clang
// provides only the error message "no matching function [...] candidate
// template ignored: couldn't infer template argument 'Functor'", with no
// reference to the fact that `&` is being used on an overloaded function.
//
// These overloads to provide better error messages will never be selected
// unless template type deduction fails because of how overload resolution
// works; per [over.ics.rank/2.2]:
//
// When comparing the basic forms of implicit conversion sequences (as defined
// in [over.best.ics])
// - a standard conversion sequence is a better conversion sequence than a
// user-defined conversion sequence or an ellipsis conversion sequence, and
// - a user-defined conversion sequence is a better conversion sequence than
// an ellipsis conversion sequence.
//
// So these overloads will only be selected as a last resort iff template type
// deduction fails.
//
// These overloads also intentionally do not return `void`, as this prevents
// clang from emitting spurious errors such as "variable has incomplete type
// 'void'" when assigning the result of `BindOnce()`/`BindRepeating()` to a
// variable with type `auto` or `decltype(auto)`.
struct BindFailedCheckPreviousErrors {};
BindFailedCheckPreviousErrors BindOnce(...);
BindFailedCheckPreviousErrors BindRepeating(...);
// Unretained() allows binding a non-refcounted class, and to disable
// refcounting on arguments that are refcounted objects.
//
// EXAMPLE OF Unretained():
//
// class Foo {
// public:
// void func() { cout << "Foo:f" << endl; }
// };
//
// // In some function somewhere.
// Foo foo;
// OnceClosure foo_callback =
// BindOnce(&Foo::func, Unretained(&foo));
// std::move(foo_callback).Run(); // Prints "Foo:f".
//
// Without the Unretained() wrapper on |&foo|, the above call would fail
// to compile because Foo does not support the AddRef() and Release() methods.
template <typename T>
inline internal::UnretainedWrapper<T> Unretained(T* o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(const raw_ptr<T, I>& o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(raw_ptr<T, I>&& o) {
return internal::UnretainedWrapper<T>(std::move(o));
}
template <typename T, typename I>
inline auto Unretained(const raw_ref<T, I>& o) {
return internal::UnretainedRefWrapper(o);
}
template <typename T, typename I>
inline auto Unretained(raw_ref<T, I>&& o) {
return internal::UnretainedRefWrapper(std::move(o));
}
// RetainedRef() accepts a ref counted object and retains a reference to it.
// When the callback is called, the object is passed as a raw pointer.
//
// EXAMPLE OF RetainedRef():
//
// void foo(RefCountedBytes* bytes) {}
//
// scoped_refptr<RefCountedBytes> bytes = ...;
// OnceClosure callback = BindOnce(&foo, base::RetainedRef(bytes));
// std::move(callback).Run();
//
// Without RetainedRef, the scoped_refptr would try to implicitly convert to
// a raw pointer and fail compilation:
//
// OnceClosure callback = BindOnce(&foo, bytes); // ERROR!
template <typename T>
inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
return internal::RetainedRefWrapper<T>(o);
}
template <typename T>
inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
return internal::RetainedRefWrapper<T>(std::move(o));
}
// Owned() transfers ownership of an object to the callback resulting from
// bind; the object will be deleted when the callback is deleted.
//
// EXAMPLE OF Owned():
//
// void foo(int* arg) { cout << *arg << endl }
//
// int* pn = new int(1);
// RepeatingClosure foo_callback = BindRepeating(&foo, Owned(pn));
//
// foo_callback.Run(); // Prints "1"
// foo_callback.Run(); // Prints "1"
// *pn = 2;
// foo_callback.Run(); // Prints "2"
//
// foo_callback.Reset(); // |pn| is deleted. Also will happen when
// // |foo_callback| goes out of scope.
//
// Without Owned(), someone would have to know to delete |pn| when the last
// reference to the callback is deleted.
template <typename T>
inline internal::OwnedWrapper<T> Owned(T* o) {
return internal::OwnedWrapper<T>(o);
}
template <typename T, typename Deleter>
inline internal::OwnedWrapper<T, Deleter> Owned(
std::unique_ptr<T, Deleter>&& ptr) {
return internal::OwnedWrapper<T, Deleter>(std::move(ptr));
}
// OwnedRef() stores an object in the callback resulting from
// bind and passes a reference to the object to the bound function.
//
// EXAMPLE OF OwnedRef():
//
// void foo(int& arg) { cout << ++arg << endl }
//
// int counter = 0;
// RepeatingClosure foo_callback = BindRepeating(&foo, OwnedRef(counter));
//
// foo_callback.Run(); // Prints "1"
// foo_callback.Run(); // Prints "2"
// foo_callback.Run(); // Prints "3"
//
// cout << counter; // Prints "0", OwnedRef creates a copy of counter.
//
// Supports OnceCallbacks as well, useful to pass placeholder arguments:
//
// void bar(int& ignore, const std::string& s) { cout << s << endl }
//
// OnceClosure bar_callback = BindOnce(&bar, OwnedRef(0), "Hello");
//
// std::move(bar_callback).Run(); // Prints "Hello"
//
// Without OwnedRef() it would not be possible to pass a mutable reference to an
// object owned by the callback.
template <typename T>
internal::OwnedRefWrapper<std::decay_t<T>> OwnedRef(T&& t) {
return internal::OwnedRefWrapper<std::decay_t<T>>(std::forward<T>(t));
}
// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
// through a RepeatingCallback. Logically, this signifies a destructive transfer
// of the state of the argument into the target function. Invoking
// RepeatingCallback::Run() twice on a callback that was created with a Passed()
// argument will CHECK() because the first invocation would have already
// transferred ownership to the target function.
//
// Note that Passed() is not necessary with BindOnce(), as std::move() does the
// same thing. Avoid Passed() in favor of std::move() with BindOnce().
//
// EXAMPLE OF Passed():
//
// void TakesOwnership(std::unique_ptr<Foo> arg) { }
// std::unique_ptr<Foo> CreateFoo() { return std::make_unique<Foo>();
// }
//
// auto f = std::make_unique<Foo>();
//
// // |cb| is given ownership of Foo(). |f| is now NULL.
// // You can use std::move(f) in place of &f, but it's more verbose.
// RepeatingClosure cb = BindRepeating(&TakesOwnership, Passed(&f));
//
// // Run was never called so |cb| still owns Foo() and deletes
// // it on Reset().
// cb.Reset();
//
// // |cb| is given a new Foo created by CreateFoo().
// cb = BindRepeating(&TakesOwnership, Passed(CreateFoo()));
//
// // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
// // no longer owns Foo() and, if reset, would not delete Foo().
// cb.Run(); // Foo() is now transferred to |arg| and deleted.
// cb.Run(); // This CHECK()s since Foo() already been used once.
//
// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and is
// best suited for use with the return value of a function or other temporary
// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
// to avoid having to write Passed(std::move(scoper)).
//
// Both versions of Passed() prevent T from being an lvalue reference. The first
// via use of enable_if, and the second takes a T* which will not bind to T&.
template <typename T,
std::enable_if_t<!std::is_lvalue_reference_v<T>>* = nullptr>
inline internal::PassedWrapper<T> Passed(T&& scoper) {
return internal::PassedWrapper<T>(std::move(scoper));
}
template <typename T>
inline internal::PassedWrapper<T> Passed(T* scoper) {
return internal::PassedWrapper<T>(std::move(*scoper));
}
// IgnoreResult() is used to adapt a function or callback with a return type to
// one with a void return. This is most useful if you have a function with,
// say, a pesky ignorable bool return that you want to use with PostTask or
// something else that expect a callback with a void return.
//
// EXAMPLE OF IgnoreResult():
//
// int DoSomething(int arg) { cout << arg << endl; }
//
// // Assign to a callback with a void return type.
// OnceCallback<void(int)> cb = BindOnce(IgnoreResult(&DoSomething));
// std::move(cb).Run(1); // Prints "1".
//
// // Prints "2" on |ml|.
// ml->PostTask(FROM_HERE, BindOnce(IgnoreResult(&DoSomething), 2);
template <typename T>
inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
return internal::IgnoreResultHelper<T>(std::move(data));
}
#if BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
// RetainBlock() is used to adapt an Objective-C block when Automated Reference
// Counting (ARC) is disabled. This is unnecessary when ARC is enabled, as the
// BindOnce and BindRepeating already support blocks then.
//
// EXAMPLE OF RetainBlock():
//
// // Wrap the block and bind it to a callback.
// OnceCallback<void(int)> cb =
// BindOnce(RetainBlock(^(int n) { NSLog(@"%d", n); }));
// std::move(cb).Run(1); // Logs "1".
template <typename R, typename... Args>
base::mac::ScopedBlock<R (^)(Args...)> RetainBlock(R (^block)(Args...)) {
return base::mac::ScopedBlock<R (^)(Args...)>(block,
base::scoped_policy::RETAIN);
}
#endif // BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
} // namespace base
#include "base/functional/bind.h"
#endif // BASE_BIND_H_

View File

@ -1,337 +1,12 @@
// Copyright 2012 The Chromium Authors
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// NOTE: Header files that do not require the full definition of
// base::{Once,Repeating}Callback or base::{Once,Repeating}Closure should
// #include "base/callback_forward.h" instead of this file.
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_CALLBACK_H_
#define BASE_CALLBACK_H_
#include <stddef.h>
#include <utility>
#include "base/bind.h"
#include "base/callback_forward.h" // IWYU pragma: export
#include "base/callback_internal.h"
#include "base/check.h"
#include "base/functional/function_ref.h"
#include "base/notreached.h"
#include "base/types/always_false.h"
// -----------------------------------------------------------------------------
// Usage documentation
// -----------------------------------------------------------------------------
//
// Overview:
// A callback is similar in concept to a function pointer: it wraps a runnable
// object such as a function, method, lambda, or even another callback, allowing
// the runnable object to be invoked later via the callback object.
//
// Unlike function pointers, callbacks are created with base::BindOnce() or
// base::BindRepeating() and support partial function application.
//
// A base::OnceCallback may be Run() at most once; a base::RepeatingCallback may
// be Run() any number of times. |is_null()| is guaranteed to return true for a
// moved-from callback.
//
// // The lambda takes two arguments, but the first argument |x| is bound at
// // callback creation.
// base::OnceCallback<int(int)> cb = base::BindOnce([] (int x, int y) {
// return x + y;
// }, 1);
// // Run() only needs the remaining unbound argument |y|.
// printf("1 + 2 = %d\n", std::move(cb).Run(2)); // Prints 3
// printf("cb is null? %s\n",
// cb.is_null() ? "true" : "false"); // Prints true
// std::move(cb).Run(2); // Crashes since |cb| has already run.
//
// Callbacks also support cancellation. A common use is binding the receiver
// object as a WeakPtr<T>. If that weak pointer is invalidated, calling Run()
// will be a no-op. Note that |IsCancelled()| and |is_null()| are distinct:
// simply cancelling a callback will not also make it null.
//
// See //docs/callback.md for the full documentation.
namespace base {
namespace internal {
struct NullCallbackTag {
template <typename Signature>
struct WithSignature {};
};
struct DoNothingCallbackTag {
template <typename Signature>
struct WithSignature {};
};
} // namespace internal
template <typename R, typename... Args>
class OnceCallback<R(Args...)> : public internal::CallbackBase {
public:
using ResultType = R;
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingType<Args>...);
constexpr OnceCallback() = default;
OnceCallback(std::nullptr_t) = delete;
constexpr OnceCallback(internal::NullCallbackTag) : OnceCallback() {}
constexpr OnceCallback& operator=(internal::NullCallbackTag) {
*this = OnceCallback();
return *this;
}
constexpr OnceCallback(internal::NullCallbackTag::WithSignature<RunType>)
: OnceCallback(internal::NullCallbackTag()) {}
constexpr OnceCallback& operator=(
internal::NullCallbackTag::WithSignature<RunType>) {
*this = internal::NullCallbackTag();
return *this;
}
constexpr OnceCallback(internal::DoNothingCallbackTag)
: OnceCallback(BindOnce([](Args... args) {})) {}
constexpr OnceCallback& operator=(internal::DoNothingCallbackTag) {
*this = BindOnce([](Args... args) {});
return *this;
}
constexpr OnceCallback(internal::DoNothingCallbackTag::WithSignature<RunType>)
: OnceCallback(internal::DoNothingCallbackTag()) {}
constexpr OnceCallback& operator=(
internal::DoNothingCallbackTag::WithSignature<RunType>) {
*this = internal::DoNothingCallbackTag();
return *this;
}
explicit OnceCallback(internal::BindStateBase* bind_state)
: internal::CallbackBase(bind_state) {}
OnceCallback(const OnceCallback&) = delete;
OnceCallback& operator=(const OnceCallback&) = delete;
OnceCallback(OnceCallback&&) noexcept = default;
OnceCallback& operator=(OnceCallback&&) noexcept = default;
OnceCallback(RepeatingCallback<RunType> other)
: internal::CallbackBase(std::move(other)) {}
OnceCallback& operator=(RepeatingCallback<RunType> other) {
static_cast<internal::CallbackBase&>(*this) = std::move(other);
return *this;
}
R Run(Args... args) const & {
static_assert(!sizeof(*this),
"OnceCallback::Run() may only be invoked on a non-const "
"rvalue, i.e. std::move(callback).Run().");
NOTREACHED();
}
R Run(Args... args) && {
// Move the callback instance into a local variable before the invocation,
// that ensures the internal state is cleared after the invocation.
// It's not safe to touch |this| after the invocation, since running the
// bound function may destroy |this|.
OnceCallback cb = std::move(*this);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
return f(cb.bind_state_.get(), std::forward<Args>(args)...);
}
// Then() returns a new OnceCallback that receives the same arguments as
// |this|, and with the return type of |then|. The returned callback will:
// 1) Run the functor currently bound to |this| callback.
// 2) Run the |then| callback with the result from step 1 as its single
// argument.
// 3) Return the value from running the |then| callback.
//
// Since this method generates a callback that is a replacement for `this`,
// `this` will be consumed and reset to a null callback to ensure the
// originally-bound functor can be run at most once.
template <typename ThenR, typename... ThenArgs>
OnceCallback<ThenR(Args...)> Then(OnceCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindOnce(
internal::ThenHelper<
OnceCallback, OnceCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
// This overload is required; even though RepeatingCallback is implicitly
// convertible to OnceCallback, that conversion will not used when matching
// for template argument deduction.
template <typename ThenR, typename... ThenArgs>
OnceCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindOnce(
internal::ThenHelper<
OnceCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::OnceCallback to base::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindOnce() is not necessary with base::FunctionRef; is it "
"possible to use a capturing lambda directly? If not, please bring up "
"this use case on #cxx (Slack) or cxx@chromium.org.");
}
};
template <typename R, typename... Args>
class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
public:
using ResultType = R;
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingType<Args>...);
constexpr RepeatingCallback() = default;
RepeatingCallback(std::nullptr_t) = delete;
constexpr RepeatingCallback(internal::NullCallbackTag)
: RepeatingCallback() {}
constexpr RepeatingCallback& operator=(internal::NullCallbackTag) {
*this = RepeatingCallback();
return *this;
}
constexpr RepeatingCallback(internal::NullCallbackTag::WithSignature<RunType>)
: RepeatingCallback(internal::NullCallbackTag()) {}
constexpr RepeatingCallback& operator=(
internal::NullCallbackTag::WithSignature<RunType>) {
*this = internal::NullCallbackTag();
return *this;
}
constexpr RepeatingCallback(internal::DoNothingCallbackTag)
: RepeatingCallback(BindRepeating([](Args... args) {})) {}
constexpr RepeatingCallback& operator=(internal::DoNothingCallbackTag) {
*this = BindRepeating([](Args... args) {});
return *this;
}
constexpr RepeatingCallback(
internal::DoNothingCallbackTag::WithSignature<RunType>)
: RepeatingCallback(internal::DoNothingCallbackTag()) {}
constexpr RepeatingCallback& operator=(
internal::DoNothingCallbackTag::WithSignature<RunType>) {
*this = internal::DoNothingCallbackTag();
return *this;
}
explicit RepeatingCallback(internal::BindStateBase* bind_state)
: internal::CallbackBaseCopyable(bind_state) {}
// Copyable and movable.
RepeatingCallback(const RepeatingCallback&) = default;
RepeatingCallback& operator=(const RepeatingCallback&) = default;
RepeatingCallback(RepeatingCallback&&) noexcept = default;
RepeatingCallback& operator=(RepeatingCallback&&) noexcept = default;
bool operator==(const RepeatingCallback& other) const {
return EqualsInternal(other);
}
bool operator!=(const RepeatingCallback& other) const {
return !operator==(other);
}
R Run(Args... args) const & {
// Keep `bind_state_` alive at least until after the invocation to ensure
// all bound `Unretained` arguments remain protected by MiraclePtr.
auto bind_state_protector = this->bind_state_;
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
return f(this->bind_state_.get(), std::forward<Args>(args)...);
}
R Run(Args... args) && {
// Move the callback instance into a local variable before the invocation,
// that ensures the internal state is cleared after the invocation.
// It's not safe to touch |this| after the invocation, since running the
// bound function may destroy |this|.
RepeatingCallback cb = std::move(*this);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
return f(std::move(cb).bind_state_.get(), std::forward<Args>(args)...);
}
// Then() returns a new RepeatingCallback that receives the same arguments as
// |this|, and with the return type of |then|. The
// returned callback will:
// 1) Run the functor currently bound to |this| callback.
// 2) Run the |then| callback with the result from step 1 as its single
// argument.
// 3) Return the value from running the |then| callback.
//
// If called on an rvalue (e.g. std::move(cb).Then(...)), this method
// generates a callback that is a replacement for `this`. Therefore, `this`
// will be consumed and reset to a null callback to ensure the
// originally-bound functor will be run at most once.
template <typename ThenR, typename... ThenArgs>
RepeatingCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) const& {
CHECK(then);
return BindRepeating(
internal::ThenHelper<
RepeatingCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
*this, std::move(then));
}
template <typename ThenR, typename... ThenArgs>
RepeatingCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindRepeating(
internal::ThenHelper<
RepeatingCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::RepeatingCallback to base::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindRepeating() is not necessary with base::FunctionRef; "
"is it possible to use a capturing lambda directly? If not, please "
"bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
};
} // namespace base
#include "base/functional/callback.h"
#endif // BASE_CALLBACK_H_

View File

@ -1,24 +1,12 @@
// Copyright 2011 The Chromium Authors
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_CALLBACK_FORWARD_H_
#define BASE_CALLBACK_FORWARD_H_
namespace base {
template <typename Signature>
class OnceCallback;
template <typename Signature>
class RepeatingCallback;
// Syntactic sugar to make OnceClosure<void()> and RepeatingClosure<void()>
// easier to declare since they will be used in a lot of APIs with delayed
// execution.
using OnceClosure = OnceCallback<void()>;
using RepeatingClosure = RepeatingCallback<void()>;
} // namespace base
#include "base/functional/callback_forward.h"
#endif // BASE_CALLBACK_FORWARD_H_

View File

@ -1,191 +1,12 @@
// Copyright 2012 The Chromium Authors
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This defines helpful methods for dealing with Callbacks. Because Callbacks
// are implemented using templates, with a class per callback signature, adding
// methods to Callback<> itself is unattractive (lots of extra code gets
// generated). Instead, consider adding methods here.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_CALLBACK_HELPERS_H_
#define BASE_CALLBACK_HELPERS_H_
#include <memory>
#include <ostream>
#include <type_traits>
#include <utility>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/check.h"
namespace base {
namespace internal {
template <typename T>
struct IsBaseCallbackImpl : std::false_type {};
template <typename R, typename... Args>
struct IsBaseCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {};
template <typename R, typename... Args>
struct IsBaseCallbackImpl<RepeatingCallback<R(Args...)>> : std::true_type {};
template <typename T>
struct IsOnceCallbackImpl : std::false_type {};
template <typename R, typename... Args>
struct IsOnceCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {};
} // namespace internal
// IsBaseCallback<T>::value is true when T is any of the Closure or Callback
// family of types.
template <typename T>
using IsBaseCallback = internal::IsBaseCallbackImpl<std::decay_t<T>>;
// IsOnceCallback<T>::value is true when T is a OnceClosure or OnceCallback
// type.
template <typename T>
using IsOnceCallback = internal::IsOnceCallbackImpl<std::decay_t<T>>;
// SFINAE friendly enabler allowing to overload methods for both Repeating and
// OnceCallbacks.
//
// Usage:
// template <template <typename> class CallbackType,
// ... other template args ...,
// typename = EnableIfIsBaseCallback<CallbackType>>
// void DoStuff(CallbackType<...> cb, ...);
template <template <typename> class CallbackType>
using EnableIfIsBaseCallback =
std::enable_if_t<IsBaseCallback<CallbackType<void()>>::value>;
namespace internal {
template <typename... Args>
class OnceCallbackHolder final {
public:
OnceCallbackHolder(OnceCallback<void(Args...)> callback,
bool ignore_extra_runs)
: callback_(std::move(callback)), ignore_extra_runs_(ignore_extra_runs) {
DCHECK(callback_);
}
OnceCallbackHolder(const OnceCallbackHolder&) = delete;
OnceCallbackHolder& operator=(const OnceCallbackHolder&) = delete;
void Run(Args... args) {
if (subtle::NoBarrier_AtomicExchange(&has_run_, 1)) {
CHECK(ignore_extra_runs_) << "Both OnceCallbacks returned by "
"base::SplitOnceCallback() were run. "
"At most one of the pair should be run.";
return;
}
DCHECK(callback_);
std::move(callback_).Run(std::forward<Args>(args)...);
}
private:
volatile subtle::Atomic32 has_run_ = 0;
base::OnceCallback<void(Args...)> callback_;
const bool ignore_extra_runs_;
};
} // namespace internal
// Wraps the given OnceCallback and returns two OnceCallbacks with an identical
// signature. On first invokation of either returned callbacks, the original
// callback is invoked. Invoking the remaining callback results in a crash.
template <typename... Args>
std::pair<OnceCallback<void(Args...)>, OnceCallback<void(Args...)>>
SplitOnceCallback(OnceCallback<void(Args...)> callback) {
if (!callback) {
// Empty input begets two empty outputs.
return std::make_pair(OnceCallback<void(Args...)>(),
OnceCallback<void(Args...)>());
}
using Helper = internal::OnceCallbackHolder<Args...>;
auto wrapped_once = base::BindRepeating(
&Helper::Run, std::make_unique<Helper>(std::move(callback),
/*ignore_extra_runs=*/false));
return std::make_pair(wrapped_once, wrapped_once);
}
// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
// that the Closure is executed no matter how the current scope exits.
// If you are looking for "ScopedCallback", "CallbackRunner", or
// "CallbackScoper" this is the class you want.
class BASE_EXPORT ScopedClosureRunner {
public:
ScopedClosureRunner();
explicit ScopedClosureRunner(OnceClosure closure);
ScopedClosureRunner(ScopedClosureRunner&& other);
// Runs the current closure if it's set, then replaces it with the closure
// from |other|. This is akin to how unique_ptr frees the contained pointer in
// its move assignment operator. If you need to explicitly avoid running any
// current closure, use ReplaceClosure().
ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
~ScopedClosureRunner();
explicit operator bool() const { return !!closure_; }
// Calls the current closure and resets it, so it wont be called again.
void RunAndReset();
// Replaces closure with the new one releasing the old one without calling it.
void ReplaceClosure(OnceClosure closure);
// Releases the Closure without calling.
[[nodiscard]] OnceClosure Release();
private:
OnceClosure closure_;
};
// Returns a placeholder type that will implicitly convert into a null callback,
// similar to how absl::nullopt / std::nullptr work in conjunction with
// absl::optional and various smart pointer types.
constexpr auto NullCallback() {
return internal::NullCallbackTag();
}
// Returns a placeholder type that will implicitly convert into a callback that
// does nothing, similar to how absl::nullopt / std::nullptr work in conjunction
// with absl::optional and various smart pointer types.
constexpr auto DoNothing() {
return internal::DoNothingCallbackTag();
}
// Similar to the above, but with a type hint. Useful for disambiguating
// among multiple function overloads that take callbacks with different
// signatures:
//
// void F(base::OnceCallback<void()> callback); // 1
// void F(base::OnceCallback<void(int)> callback); // 2
//
// F(base::NullCallbackAs<void()>()); // calls 1
// F(base::DoNothingAs<void(int)>()); // calls 2
template <typename Signature>
constexpr auto NullCallbackAs() {
return internal::NullCallbackTag::WithSignature<Signature>();
}
template <typename Signature>
constexpr auto DoNothingAs() {
return internal::DoNothingCallbackTag::WithSignature<Signature>();
}
// Useful for creating a Closure that will delete a pointer when invoked. Only
// use this when necessary. In most cases MessageLoop::DeleteSoon() is a better
// fit.
template <typename T>
void DeletePointer(T* obj) {
delete obj;
}
} // namespace base
#include "base/functional/callback_helpers.h"
#endif // BASE_CALLBACK_HELPERS_H_

View File

@ -49,9 +49,9 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_internal.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/functional/callback_internal.h"
#include "base/memory/weak_ptr.h"
namespace base {

View File

@ -99,6 +99,10 @@ class BASE_EXPORT CheckError {
LogMessage* const log_message_;
};
#define CHECK_FUNCTION_IMPL(check_function, condition) \
LAZY_CHECK_STREAM(check_function(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
@ -119,31 +123,22 @@ class BASE_EXPORT CheckError {
#else
#define CHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::Check(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define CHECK_WILL_STREAM() true
#define PCHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::PCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define CHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::Check, condition)
#define PCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck, condition)
#endif
#if DCHECK_IS_ON()
#define DCHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::DCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define DPCHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::DPCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define DCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DCheck, condition)
#define DPCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DPCheck, condition)
#else

View File

@ -140,8 +140,7 @@ class CheckOpResult {
char* message_ = nullptr;
};
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if !CHECK_WILL_STREAM()
// Discard log strings to reduce code bloat.
#define CHECK_OP(name, op, val1, val2) CHECK((val1)op(val2))

View File

@ -510,6 +510,7 @@ void CommandLine::ParseFromString(StringPieceType command_line) {
if (downlevel_shell32_dll)
::FreeLibrary(downlevel_shell32_dll);
}
#endif // BUILDFLAG(IS_WIN)
void CommandLine::AppendSwitchesAndArguments(
@ -650,6 +651,7 @@ void CommandLine::ParseAsSingleArgument(
single_arg_switch_position + single_arg_switch.length() + 1;
if (arg_position >= raw_command_line_string_.length())
return;
has_single_argument_switch_ = true;
const StringPieceType arg = raw_command_line_string_.substr(arg_position);
if (!arg.empty()) {
AppendArgNative(arg);

View File

@ -208,6 +208,11 @@ class BASE_EXPORT CommandLine {
// Initialize by parsing the given command line string.
// The program name is assumed to be the first item in the string.
void ParseFromString(StringPieceType command_line);
// Returns true if the command line had the --single-argument switch, and
// thus likely came from a Windows shell registration. This is only set if the
// command line is parsed, and is not changed after it is parsed.
bool HasSingleArgumentSwitch() const { return has_single_argument_switch_; }
#endif
// Sets a delegate that's called when we encounter a duplicate switch
@ -247,6 +252,11 @@ class BASE_EXPORT CommandLine {
// ParseFromString(). Empty if this command line was not parsed from a string,
// or if ParseFromString() has finished executing.
StringPieceType raw_command_line_string_;
// Set to true if the command line had --single-argument when initially
// parsed. It does not change if the command line mutates after initial
// parsing.
bool has_single_argument_switch_ = false;
#endif
// The singleton CommandLine representing the current process's command line.

View File

@ -406,9 +406,13 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#define GSL_POINTER
#endif
// Adds the "logically_const" tag to a symbol's mangled name, which can be
// recognized by the "Mutable Constants" check
// (https://chromium.googlesource.com/chromium/src/+/main/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants).
// Adds the "logically_const" tag to a symbol's mangled name. The "Mutable
// Constants" check [1] detects instances of constants that aren't in .rodata,
// e.g. due to a missing `const`. Using this tag suppresses the check for this
// symbol, allowing it to live outside .rodata without a warning.
//
// [1]:
// https://crsrc.org/c/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants
#if defined(COMPILER_GCC) || defined(__clang__)
#define LOGICALLY_CONST [[gnu::abi_tag("logically_const")]]
#else

View File

@ -11,7 +11,6 @@
#include <type_traits>
#include <utility>
#include "base/as_const.h"
#include "base/check.h"
#include "base/containers/vector_buffer.h"
#include "base/dcheck_is_on.h"
@ -529,11 +528,11 @@ class circular_deque {
return buffer_[i - right_size];
}
value_type& at(size_type i) {
return const_cast<value_type&>(base::as_const(*this).at(i));
return const_cast<value_type&>(std::as_const(*this).at(i));
}
value_type& operator[](size_type i) {
return const_cast<value_type&>(base::as_const(*this)[i]);
return const_cast<value_type&>(std::as_const(*this)[i]);
}
const value_type& operator[](size_type i) const { return at(i); }

View File

@ -12,7 +12,6 @@
#include <type_traits>
#include <utility>
#include "base/as_const.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/functional/not_fn.h"
@ -951,7 +950,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(const K& key)
-> iterator {
return const_cast_it(base::as_const(*this).find(key));
return const_cast_it(std::as_const(*this).find(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -974,7 +973,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
const K& key) -> std::pair<iterator, iterator> {
auto res = base::as_const(*this).equal_range(key);
auto res = std::as_const(*this).equal_range(key);
return {const_cast_it(res.first), const_cast_it(res.second)};
}
@ -995,7 +994,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const K& key) -> iterator {
return const_cast_it(base::as_const(*this).lower_bound(key));
return const_cast_it(std::as_const(*this).lower_bound(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -1016,7 +1015,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const K& key) -> iterator {
return const_cast_it(base::as_const(*this).upper_bound(key));
return const_cast_it(std::as_const(*this).upper_bound(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>

View File

@ -300,6 +300,8 @@ void CPU::Initialize(bool require_branding) {
has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
has_pku_ = (cpu_info7[2] & 0x00000008) != 0;
}
// Get the brand string of the cpu.

View File

@ -102,6 +102,13 @@ class BASE_EXPORT CPU final {
constexpr bool has_bti() const { return false; }
#endif
#if defined(ARCH_CPU_X86_FAMILY)
// Memory protection key support for user-mode pages
bool has_pku() const { return has_pku_; }
#else
constexpr bool has_pku() const { return false; }
#endif
#if defined(ARCH_CPU_X86_FAMILY)
IntelMicroArchitecture GetIntelMicroArchitecture() const;
#endif
@ -192,6 +199,9 @@ class BASE_EXPORT CPU final {
#if defined(ARCH_CPU_ARM_FAMILY)
bool has_mte_ = false; // Armv8.5-A MTE (Memory Taggging Extension)
bool has_bti_ = false; // Armv8.5-A BTI (Branch Target Identification)
#endif
#if defined(ARCH_CPU_X86_FAMILY)
bool has_pku_ = false;
#endif
bool has_non_stop_time_stamp_counter_ = false;
bool is_running_in_vm_ = false;

View File

@ -19,8 +19,9 @@ namespace {
// Whether to enable a series of optimizations that reduce total CPU
// utilization.
constexpr Feature kReduceCpuUtilization{"ReduceCpuUtilization",
FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kReduceCpuUtilization,
"ReduceCpuUtilization",
FEATURE_DISABLED_BY_DEFAULT);
class CpuReductionExperimentSubSampler {
public:

View File

@ -0,0 +1,21 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define BASE_CXX20_IS_CONSTANT_EVALUATED_H_
namespace base {
// Implementation of C++20's std::is_constant_evaluated.
//
// References:
// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
// - https://wg21.link/meta.const.eval
constexpr bool is_constant_evaluated() noexcept {
return __builtin_is_constant_evaluated();
}
} // namespace base
#endif // BASE_CXX20_IS_CONSTANT_EVALUATED_H_

View File

@ -9,9 +9,10 @@
namespace base {
// Simplified C++14 implementation of C++20's std::to_address.
// Note: This does not consider specializations of pointer_traits<>::to_address,
// since that member function may only be present in C++20 and later.
// Implementation of C++20's std::to_address.
// Note: This does consider specializations of pointer_traits<>::to_address,
// even though it's a C++20 member function, because CheckedContiguousIterator
// specializes pointer_traits<> with a to_address() member.
//
// Reference: https://wg21.link/pointer.conversion#lib:to_address
template <typename T>
@ -22,7 +23,13 @@ constexpr T* to_address(T* p) noexcept {
}
template <typename Ptr>
constexpr auto to_address(const Ptr& p) noexcept {
constexpr auto to_address(const Ptr& p) noexcept
-> decltype(std::pointer_traits<Ptr>::to_address(p)) {
return std::pointer_traits<Ptr>::to_address(p);
}
template <typename Ptr, typename... None>
constexpr auto to_address(const Ptr& p, None...) noexcept {
return base::to_address(p.operator->());
}

View File

@ -48,7 +48,7 @@ namespace debug {
//
// static auto* const crash_key = base::debug::AllocateCrashKeyString(
// "name", base::debug::CrashKeySize::Size32);
// base::debug::SetCrashKeyString(crash_key);
// base::debug::SetCrashKeyString(crash_key, "value");
//
// // Do other work before calling `base::debug::DumpWithoutCrashing()` later.
//

View File

@ -118,36 +118,10 @@ SymbolMap::SymbolMap() {
void SymbolMap::Populate() {
zx_handle_t process = zx_process_self();
// Try to fetch the name of the process' main executable, which was set as the
// name of the |process| kernel object.
// TODO(crbug.com/1131250): Object names can only have up to ZX_MAX_NAME_LEN
// characters, so if we keep hitting problems with truncation, find a way to
// plumb argv[0] through to here instead, e.g. using
// CommandLine::GetProgramName().
char app_name[std::extent<decltype(SymbolMap::Module::name)>()];
zx_status_t status =
zx_object_get_property(process, ZX_PROP_NAME, app_name, sizeof(app_name));
if (status == ZX_OK) {
// The process name may have a process type suffix at the end (e.g.
// "context", "renderer", gpu"), which doesn't belong in the module list.
// Trim the suffix from the name.
for (size_t i = 0; i < std::size(app_name) && app_name[i] != '\0'; ++i) {
if (app_name[i] == ':') {
app_name[i] = 0;
break;
}
}
} else {
DPLOG(WARNING)
<< "Couldn't get name, falling back to 'app' for program name: "
<< status;
strlcat(app_name, "app", sizeof(app_name));
}
// Retrieve the debug info struct.
uintptr_t debug_addr;
status = zx_object_get_property(process, ZX_PROP_PROCESS_DEBUG_ADDR,
&debug_addr, sizeof(debug_addr));
zx_status_t status = zx_object_get_property(
process, ZX_PROP_PROCESS_DEBUG_ADDR, &debug_addr, sizeof(debug_addr));
if (status != ZX_OK) {
DPLOG(ERROR) << "Couldn't get symbol map for process: " << status;
return;
@ -202,7 +176,8 @@ void SymbolMap::Populate() {
strlcpy(next_entry.name, elf_library_name->data(),
elf_library_name->size() + 1);
} else {
StringPiece link_map_name(lmap->l_name[0] ? lmap->l_name : app_name);
StringPiece link_map_name(lmap->l_name[0] ? lmap->l_name
: "<executable>");
// The "module" stack trace annotation doesn't allow for strings which
// resemble paths, so extract the filename portion from |link_map_name|.

View File

@ -51,8 +51,9 @@ const Feature* g_initialized_from_accessor = nullptr;
// TODO(crbug.com/1341292): Remove this global and this feature once the gains
// are measured.
bool g_cache_override_state = false;
const base::Feature kCacheFeatureOverrideState{
"CacheFeatureOverrideState", base::FEATURE_ENABLED_BY_DEFAULT};
BASE_FEATURE(kCacheFeatureOverrideState,
"CacheFeatureOverrideState",
base::FEATURE_ENABLED_BY_DEFAULT);
#if DCHECK_IS_ON()
// Tracks whether the use of base::Feature is allowed for this module.
@ -201,8 +202,9 @@ uint32_t PackFeatureCache(FeatureList::OverrideState override_state,
} // namespace
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
FEATURE_DISABLED_BY_DEFAULT};
BASE_FEATURE(kDCheckIsFatalFeature,
"DcheckIsFatal",
FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
FeatureList::FeatureList() = default;

Some files were not shown because too many files have changed in this diff Show More