Compare commits

..

No commits in common. "f032d7911c59a6bcc288519ff30c4381e2912375" and "369729a74e7a6847b010d8c90ee7598cbccd99d3" have entirely different histories.

1335 changed files with 23715 additions and 27299 deletions

View File

@ -10,13 +10,13 @@ defaults:
shell: bash shell: bash
working-directory: src working-directory: src
env: env:
CACHE_EPOCH: 1 CACHE_EPOCH: 2
CCACHE_MAXSIZE: 200M CCACHE_MAXSIZE: 200M
CCACHE_MAXFILES: 0 CCACHE_MAXFILES: 0
SCCACHE_CACHE_SIZE: 200M SCCACHE_CACHE_SIZE: 200M
jobs: jobs:
cache-toolchains-posix: cache-toolchains-posix:
runs-on: ubuntu-20.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Cache toolchains (Linux, OpenWrt, Android) - name: Cache toolchains (Linux, OpenWrt, Android)
@ -49,7 +49,7 @@ jobs:
wget https://snapshot.debian.org/archive/debian/20220515T152741Z/pool/main/q/qemu/qemu-user-static_7.0%2Bdfsg-6_amd64.deb wget https://snapshot.debian.org/archive/debian/20220515T152741Z/pool/main/q/qemu/qemu-user-static_7.0%2Bdfsg-6_amd64.deb
fi fi
cache-toolchains-win: cache-toolchains-win:
runs-on: windows-2019 runs-on: windows-2022
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Cache toolchains - name: Cache toolchains
@ -79,7 +79,7 @@ jobs:
unzip ninja-win.zip -d ~/bin unzip ninja-win.zip -d ~/bin
fi fi
cache-toolchains-mac: cache-toolchains-mac:
runs-on: macos-11 runs-on: macos-12
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: actions/cache@v3 - uses: actions/cache@v3
@ -93,7 +93,7 @@ jobs:
- run: EXTRA_FLAGS='target_cpu="arm64"' ./get-clang.sh - run: EXTRA_FLAGS='target_cpu="arm64"' ./get-clang.sh
linux: linux:
needs: cache-toolchains-posix needs: cache-toolchains-posix
runs-on: ubuntu-20.04 runs-on: ubuntu-22.04
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -160,7 +160,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
android: android:
needs: cache-toolchains-posix needs: cache-toolchains-posix
runs-on: ubuntu-20.04 runs-on: ubuntu-22.04
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -233,7 +233,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
win: win:
needs: cache-toolchains-win needs: cache-toolchains-win
runs-on: windows-2019 runs-on: windows-2022
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -297,7 +297,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
mac: mac:
needs: cache-toolchains-mac needs: cache-toolchains-mac
runs-on: macos-11 runs-on: macos-12
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -349,7 +349,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ios: ios:
needs: cache-toolchains-mac needs: cache-toolchains-mac
runs-on: macos-11 runs-on: macos-12
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -382,7 +382,7 @@ jobs:
- run: ccache -s - run: ccache -s
openwrt: openwrt:
needs: cache-toolchains-posix needs: cache-toolchains-posix
runs-on: ubuntu-20.04 runs-on: ubuntu-22.04
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:

View File

@ -1 +1 @@
108.0.5359.94 107.0.5304.87

View File

@ -225,7 +225,6 @@ Chaobin Zhang <zhchbin@gmail.com>
Charles Vaughn <cvaughn@gmail.com> Charles Vaughn <cvaughn@gmail.com>
Cheng Zhao <zcbenz@gmail.com> Cheng Zhao <zcbenz@gmail.com>
Cheng Yu <yuzichengcode@gmail.com> Cheng Yu <yuzichengcode@gmail.com>
Cheung Ho <uioptt24@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com> Choongwoo Han <cwhan.tunz@gmail.com>
Chris Greene <cwgreene@amazon.com> Chris Greene <cwgreene@amazon.com>
Chris Harrelson <chrishtr@gmail.com> Chris Harrelson <chrishtr@gmail.com>
@ -341,7 +340,6 @@ Egor Starkov <egor.starkov@samsung.com>
Ehsan Akhgari <ehsan.akhgari@gmail.com> Ehsan Akhgari <ehsan.akhgari@gmail.com>
Ehsan Akhgari <ehsan@mightyapp.com> Ehsan Akhgari <ehsan@mightyapp.com>
Elan Ruusamäe <elan.ruusamae@gmail.com> Elan Ruusamäe <elan.ruusamae@gmail.com>
Emil Suleymanov <emil@esnx.xyz>
Ergun Erdogmus <erdogmusergun@gmail.com> Ergun Erdogmus <erdogmusergun@gmail.com>
Eric Ahn <byungwook.ahn@gmail.com> Eric Ahn <byungwook.ahn@gmail.com>
Eric Huang <ele828@gmail.com> Eric Huang <ele828@gmail.com>
@ -438,7 +436,6 @@ Heeyoun Lee <heeyoun.lee@samsung.com>
Henrique de Carvalho <decarv.henrique@gmail.com> Henrique de Carvalho <decarv.henrique@gmail.com>
Henrique Limas <henrique.ramos.limas@gmail.com> Henrique Limas <henrique.ramos.limas@gmail.com>
Himanshu Joshi <h.joshi@samsung.com> Himanshu Joshi <h.joshi@samsung.com>
Hiroki Oshima <hiroki.oshima@gmail.com>
Hiroyuki Matsuda <gsittyz@gmail.com> Hiroyuki Matsuda <gsittyz@gmail.com>
Hodol Han <bab6ting@gmail.com> Hodol Han <bab6ting@gmail.com>
Holger Kraus <kraush@amazon.com> Holger Kraus <kraush@amazon.com>
@ -1319,7 +1316,6 @@ Yi Zhang <yi.y.zhang@intel.com>
Yizhou Jiang <yizhou.jiang@intel.com> Yizhou Jiang <yizhou.jiang@intel.com>
Yoav Weiss <yoav@yoav.ws> Yoav Weiss <yoav@yoav.ws>
Yoav Zilberberg <yoav.zilberberg@gmail.com> Yoav Zilberberg <yoav.zilberberg@gmail.com>
Yoichiro Hibara <hibarayoichiro871@gmail.com>
Yong Ling <yongling@tencent.com> Yong Ling <yongling@tencent.com>
Yong Shin <sy3620@gmail.com> Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com> Yong Wang <ccyongwang@tencent.com>
@ -1356,7 +1352,6 @@ Zach Bjornson <zbbjornson@gmail.com>
Zachary Capalbo <zach.geek@gmail.com> Zachary Capalbo <zach.geek@gmail.com>
Zeno Albisser <zeno.albisser@digia.com> Zeno Albisser <zeno.albisser@digia.com>
Zeqin Chen <talonchen@tencent.com> Zeqin Chen <talonchen@tencent.com>
Zhang Hao <zhanghao.m@bytedance.com>
Zhang Hao <15686357310a@gmail.com> Zhang Hao <15686357310a@gmail.com>
Zhaoze Zhou <zhaoze.zhou@partner.samsung.com> Zhaoze Zhou <zhaoze.zhou@partner.samsung.com>
Zheda Chen <zheda.chen@intel.com> Zheda Chen <zheda.chen@intel.com>
@ -1404,9 +1399,8 @@ Duck Duck Go, Inc. <*@duckduckgo.com>
Endless Mobile, Inc. <*@endlessm.com> Endless Mobile, Inc. <*@endlessm.com>
EngFlow, Inc. <*@engflow.com> EngFlow, Inc. <*@engflow.com>
Estimote, Inc. <*@estimote.com> Estimote, Inc. <*@estimote.com>
Meta Platforms, Inc. <*@fb.com> Facebook, Inc. <*@fb.com>
Meta Platforms, Inc. <*@meta.com> Facebook, Inc. <*@oculus.com>
Meta Platforms, Inc. <*@oculus.com>
Google Inc. <*@google.com> Google Inc. <*@google.com>
Grammarly, Inc. <*@grammarly.com> Grammarly, Inc. <*@grammarly.com>
Hewlett-Packard Development Company, L.P. <*@hp.com> Hewlett-Packard Development Company, L.P. <*@hp.com>

362
src/DEPS
View File

@ -72,9 +72,6 @@ vars = {
# TODO(ehmaldonado): Remove this once the bug in gclient is fixed. # TODO(ehmaldonado): Remove this once the bug in gclient is fixed.
'checkout_fuchsia': False, 'checkout_fuchsia': False,
# Used for downloading the Fuchsia SDK without running hooks.
'checkout_fuchsia_no_hooks': False,
# Pull in Android prebuilts build tools so we can create Java xrefs # Pull in Android prebuilts build tools so we can create Java xrefs
'checkout_android_prebuilts_build_tools': False, 'checkout_android_prebuilts_build_tools': False,
@ -103,9 +100,6 @@ vars = {
# restricted to Googlers only. # restricted to Googlers only.
'checkout_chromium_fsc_test_dependencies': False, 'checkout_chromium_fsc_test_dependencies': False,
# By default, src-internal checks out //clank.
'checkout_clank_via_src_internal': True,
# By default, do not check out Google Benchmark. The library is only used by a # By default, do not check out Google Benchmark. The library is only used by a
# few specialized benchmarks that most developers do not interact with. Will # few specialized benchmarks that most developers do not interact with. Will
# be overridden by gclient variables. # be overridden by gclient variables.
@ -206,12 +200,21 @@ vars = {
# qemu on linux-arm64 machines. # qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False, 'checkout_fuchsia_for_arm64_host': False,
# Whether to checkout test related data. For compile only builder, we should
# consider using this flag to save some resources.
# This is introduced because of crbug.com/1358788.
'checkout_testdata': True,
# Revision of Crubit (trunk on 2022-08-26). This should typically be the # Revision of Crubit (trunk on 2022-08-26). This should typically be the
# same as the revision specified in CRUBIT_REVISION in # same as the revision specified in CRUBIT_REVISION in
# tools/rust/update_rust.py. More details and roll instructions can be # tools/rust/update_rust.py. More details and roll instructions can be
# found in tools/rust/README.md. # found in tools/rust/README.md.
'crubit_revision': '2c34caee7c3b4c2dfbcb0e935efcbc05ebc0f61d', 'crubit_revision': '2c34caee7c3b4c2dfbcb0e935efcbc05ebc0f61d',
# Run 'vpython_common' hook if this is set.
# TODO(crbug.com/1329052): remove this when we remove .vpython.
'enable_vpython_common_crbug_1329052': True,
# By default, download the fuchsia sdk from the public sdk directory. # By default, download the fuchsia sdk from the public sdk directory.
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/', 'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
@ -250,7 +253,7 @@ vars = {
# luci-go CIPD package version. # luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder. # Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:9f65ffe719f73af390727d369b342c22fa37ea54', 'luci_go': 'git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD # This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision. # instead of downloading the prebuilt pinned revision.
@ -277,20 +280,18 @@ vars = {
# Use the experimental version of the RTS model # Use the experimental version of the RTS model
'checkout_rts_experimental_model': False, 'checkout_rts_experimental_model': False,
# By default, do not check out the re-client binaries.
'checkout_reclient': False,
# Make Dawn skip its standalone dependencies # Make Dawn skip its standalone dependencies
'dawn_standalone': False, 'dawn_standalone': False,
# reclient CIPD package version # reclient CIPD package version
'reclient_version': 're_client_version:0.81.1.0853992-gomaip', 'reclient_version': 're_client_version:0.78.0.6f1e751-gomaip',
# Fetch Rust-related packages. # Enable fetching Rust-related packages.
'use_rust': False, 'use_rust': False,
# Fetch dependencies needed to build Rust toolchain. Not needed if developing
# Rust code in Chromium; instead enable use_rust. Only use if building the
# Rust toolchain.
'checkout_rust_toolchain_deps': False,
'android_git': 'https://android.googlesource.com', 'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com', 'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com', 'boringssl_git': 'https://boringssl.googlesource.com',
@ -304,34 +305,34 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia # the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other. # and whatever else without interference from each other.
'skia_revision': '7c55be996a81ff9c5c66984c9d4ef85d12a44c8c', 'skia_revision': '3a8c9bc2f275732b2fd1a566becf421e62fe1f46',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8 # the commit queue can handle CLs rolling V8
# and whatever else without interference from each other. # and whatever else without interference from each other.
'v8_revision': '3155b0d10c058d2a9f1d7bba00ad398b3e03b841', 'v8_revision': '5a8a4a69ac843a0724f94cc2244b66b51f0f6806',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE # the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other. # and whatever else without interference from each other.
'angle_revision': 'ceec659ac60b0c8ee9d9c602ca1a878ec1d3a88f', 'angle_revision': 'bbf57e6db2fab3ee4c4336d6c73786b73aff28b2',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader # the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other. # and whatever else without interference from each other.
'swiftshader_revision': 'b22b1b1f2dddcf5eacc8d2a37e7d27f650e1c1e2', 'swiftshader_revision': '9e96423f7ed2c22eea3274e3f4c20dd5ca80a18c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium # the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other. # and whatever else without interference from each other.
'pdfium_revision': '9d2c662f557544e5edb74a60b52fb297f4c5dfee', 'pdfium_revision': '6d0a3d5365d04967d67399617acc16bc7e7efe52',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL # the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other. # and whatever else without interference from each other.
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '1ee71185a2322dc354bee5e5a0abfb1810a27dc6', 'boringssl_revision': '19009c51bff0706362e824f66a0b189326a1c27d',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk # the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other. # and whatever else without interference from each other.
'fuchsia_version': 'version:9.20221006.5.1', 'fuchsia_version': 'version:9.20220915.2.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -355,7 +356,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_revision': '0b62c1e43dc4b0e3c50662aac757e4f7321e5466', 'freetype_revision': '8e68439a6ffc9e489a70f2c278a016fe15394abf',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -363,7 +364,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz # the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other. # and whatever else without interference from each other.
'harfbuzz_revision': '56c467093598ec559a7148b61e112e9de52b7076', 'harfbuzz_revision': 'fa471043fccb94444510e3300ac2573297c82137',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter # the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -375,7 +376,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult # the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other. # and whatever else without interference from each other.
'catapult_revision': '4793433248183dd073e608f655204d4acfdc7193', 'catapult_revision': 'c06765563619e1d881a26f0e74ef20b69c33c287',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer # the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -383,7 +384,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend # the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other. # and whatever else without interference from each other.
'devtools_frontend_revision': '33bb29b551b54b0ac67025e8b3e0ce69352c9504', 'devtools_frontend_revision': '53692a6c54e3ba615c0641df3ba802bd2e4c2ce7',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator # the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -419,11 +420,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'dawn_revision': 'c84d06e8603ce9c4b5c8d86e42e9ec0acf3bd689', 'dawn_revision': '6ab02659de463f93a0e723845455bf06d00bb683',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'quiche_revision': 'a338ea8277642f6d78022dc8e3aaed182a804413', 'quiche_revision': 'a80d5908299d3d303b7608207e03320662f4d55f',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit # the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -437,13 +438,17 @@ vars = {
# and whatever else without interference from each other. # and whatever else without interference from each other.
'wuffs_revision': 'a8205c2fe7564b12fea81ee028ba670112cc7719', 'wuffs_revision': 'a8205c2fe7564b12fea81ee028ba670112cc7719',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libgifcodec
# and whatever else without interference from each other.
'libgifcodec_revision': 'd06d2a6d42baf6c0c91cacc28df2542a911d05fe',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif # the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libavif_revision': 'de7e6c0d98abcd6843c4a9bf4cee731141dca566', 'libavif_revision': 'e0954237c40ff75dbc79991ea4774853ad09bed7',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby # the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nearby_revision': '4bd0337c105c502de845ba9501ad6e0350f613b9', 'nearby_revision': 'bbe77d839756d0207f52a13b371e5daaf273854b',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage # the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -455,19 +460,15 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'cros_components_revision': 'a0979aacb8744f42ed7abd966a6b0ac7578a73e9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'resultdb_version': 'git_revision:6cc18e2763e180929d70c786b419c1f8e6bcc66c', 'resultdb_version': 'git_revision:6cc18e2763e180929d70c786b419c1f8e6bcc66c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libcxxabi_revision': '9572e56a12c88c011d504a707ca94952be4664f9', 'libcxxabi_revision': '5c3e02e92ae8bbc1bf1001bd9ef0d76e044ddb86',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libunwind_revision': '1111799723f6a003e6f52202b9bf84387c552081', 'libunwind_revision': '60a480ee1819266cf8054548454f99838583cd76',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -483,18 +484,14 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg # the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other. # and whatever else without interference from each other.
'ffmpeg_revision': 'b9f01c3c54576330b2cf8918c54d5ee5be8faefe', 'ffmpeg_revision': '3dd1a90db3f7ec955ff5476bd4ee5942f093c6fe',
# If you change this, also update the libc++ revision in # If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni. # //buildtools/deps_revisions.gni.
'libcxx_revision': '64d36e572d3f9719c5d75011a718f33f11126851', 'libcxx_revision': 'e2f63a1a48a3cdcacbfc212236050ca5deeacc30',
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:b9c6c19be95a3863e02f00f1fe403b2502e345b6', 'gn_version': 'git_revision:fff29c1b3f9703ea449f720fe70fa73575ef24e5',
# ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
'ninja_version': 'version:2@1.8.2.chromium.3',
} }
# Only these hosts are allowed for dependencies in this DEPS file. # Only these hosts are allowed for dependencies in this DEPS file.
@ -569,15 +566,7 @@ deps = {
} }
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
}, 'condition': 'checkout_reclient',
'src/third_party/ninja': {
'packages': [
{
'package': 'infra/3pp/tools/ninja/${{platform}}',
'version': Var('ninja_version'),
}
],
'dep_type': 'cipd',
}, },
'src/third_party/android_rust_toolchain/toolchain': { 'src/third_party/android_rust_toolchain/toolchain': {
'packages': [ 'packages': [
@ -590,16 +579,6 @@ deps = {
# TODO(https://crbug.com/1292038): gate this on use_rust as well as host_os. # TODO(https://crbug.com/1292038): gate this on use_rust as well as host_os.
'condition': 'host_os == "linux"', 'condition': 'host_os == "linux"',
}, },
'src/third_party/rust_src/src': {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': 'version:2@2022-09-14',
},
],
'dep_type': 'cipd',
'condition': 'checkout_rust_toolchain_deps or use_rust',
},
# We don't know target_cpu at deps time. At least until there's a universal # We don't know target_cpu at deps time. At least until there's a universal
# binary of httpd-php, pull both intel and arm versions in DEPS and then pick # binary of httpd-php, pull both intel and arm versions in DEPS and then pick
@ -666,17 +645,6 @@ deps = {
'version': '29MbwZukN0c7nlUhmVKLU6ecK99dCu-ZwYa3ICqbwB0C', 'version': '29MbwZukN0c7nlUhmVKLU6ecK99dCu-ZwYa3ICqbwB0C',
}, },
], ],
}
,
'src/third_party/updater/chrome_mac_universal_prod': {
'dep_type': 'cipd',
'condition': 'checkout_mac',
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'E3rEUfkgLutRcZKGPJN_yWoC1G-4rTIhzpXGcsUNqCsC',
},
],
}, },
'src/third_party/updater/chrome_win_x86': { 'src/third_party/updater/chrome_win_x86': {
@ -685,7 +653,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86', 'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'rqP-urpwa5NOuHhuLVNHyT9d_Psk1xDc8ELSstaIkUUC', 'version': 'em43A4AKh3oOpCoZpuUNcLGHrCXvvhbN0ZmH496fxJIC',
}, },
], ],
}, },
@ -696,7 +664,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86_64', 'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': '7nSN9jjsZ507lwEcJQKUFM_Z2wHmjJmU3nzo1s-r8-UC', 'version': 'DCttrzwwk19ogJm0ex2eqLSWWtSKjvNYwsvCRQsquhAC',
}, },
], ],
}, },
@ -731,7 +699,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86', 'package': 'chromium/third_party/updater/chromium_win_x86',
'version': '2yELAOdPaRyB3HuFsiecHXc4zcXVupx9cLa9ZAh-Z2wC', 'version': 'uOq6n_KBa1dlVKW_KFtLKAp0Pm6KyZJegG06QbKLbJUC',
}, },
], ],
}, },
@ -742,7 +710,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86_64', 'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'vuc_q-ghg3H11b1O-ENURYlDO8hrcpCc4AuN1Expx3gC', 'version': 'wKB8ION1Ag7GToknXXyg9vDJH3-qYafDygZ68-yy-ccC',
}, },
], ],
}, },
@ -751,7 +719,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/chrome/test/data/autofill/captured_sites', 'package': 'chromium/chrome/test/data/autofill/captured_sites',
'version': 'JT0XFwfg09pcNqt5n56Ki9hpRPgfGDSuDd1DRGBSpoYC', 'version': 'VM29jX57EDTykZu3syuhUrnvzW2Ss_503ebhvWpYDIMC',
} }
], ],
'condition': 'checkout_chromium_autofill_test_dependencies', 'condition': 'checkout_chromium_autofill_test_dependencies',
@ -812,23 +780,17 @@ deps = {
'src/chrome/test/data/xr/webvr_info': 'src/chrome/test/data/xr/webvr_info':
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248', Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'cf43b2bf3206ff908b2d17be5baba31b7b19f5d3',
'condition': 'checkout_android and checkout_src_internal and not checkout_clank_via_src_internal',
},
'src/docs/website': { 'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '7da061134f35c390ac1549a82704a1762f9a5261', 'url': Var('chromium_git') + '/website.git' + '@' + '7627eaaa9d3d78c19f4d09758492061d70425f0f',
}, },
'src/ios/third_party/earl_grey2/src': { 'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + 'abd4e95736740cf61d2c63223396e163d3f08415', 'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '4e3e67586cb1cba8aa317f446ca3d367ec8f618b',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/edo/src': { 'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '904c99f0237920066a507129b0266080db3fda11', 'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'fa262201b8c29d6160d5773eac72f9a4dccd1c92',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -838,7 +800,7 @@ deps = {
}, },
'src/ios/third_party/material_components_ios/src': { 'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'e7619686aab6b4e438ab51cd3fe03396b2f872c6', 'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '572585b60a0344363e5bf1808558ac064a0937ed',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -908,7 +870,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/linux-amd64', 'package': 'chromium/rts/model/linux-amd64',
'version': 'gjjgFT1JcYKD-SV0nFWRTeGr2kufiafn_rvDI-gFW0QC', 'version': 'Y3vDcuXgYTosolRwsaziNc_mik48-WLlwUPsUgxeAnkC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -919,7 +881,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/mac-amd64', 'package': 'chromium/rts/model/mac-amd64',
'version': 'xH8MfShB-S7HYkM3gLOUa916ukoEtDJa-8X1bOwfevsC', 'version': 'G2OtoWUEI0-wVZHRLL7YQd7BKr2Jl4jWkAsWBSkWbZYC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -930,7 +892,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/windows-amd64', 'package': 'chromium/rts/model/windows-amd64',
'version': 'SWCvrm3LQO_Y0XbcVVs0q2CJOVKn0ImNLJ0WPQDKx5YC', 'version': 'h8mT4J8MUHPy0GQ5Qwo8gBOsgyn5pioUXwRFrffdhWcC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -998,7 +960,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/androidx', 'package': 'chromium/third_party/androidx',
'version': 'H4XoDJ7V7LZUIhvV2qwFHWYJoIY4MJkGQK-Q2vv-dq4C', 'version': 'BbXH8lVPlEbDvjMAQkb2qRAf9NhWCbsGsAHBt3Yv1aEC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1064,7 +1026,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/lint', 'package': 'chromium/third_party/android_build_tools/lint',
'version': 'DO1bMH_JFEfZXSkAknIs7AfgNh4IwLtJaeMJTdzfuJwC', 'version': 'JpRGSVY_dRFR9MLSN_235CfYK4S9jovg0yqlX98htXIC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1075,7 +1037,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/manifest_merger', 'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': 'bUREd_PkCqlp2ww6zmyOLGf0jhqgbnf6GT4V1xkAZ10C', 'version': 'tAZpJUnwhFBJmu1ctEKYMLJp7l3qJufDu7ByW6waq3QC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1126,7 +1088,7 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'), Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src': 'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '3d3f3d6f27288d7b0628ae5259238162c5e5ae76', Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'c2d02ffaef3e21df65640bb84cde6ac90d45303e',
'src/third_party/dav1d/libdav1d': 'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b', Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1213,7 +1175,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple # Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild. # Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': { 'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '6ebc1b94de0dc73bba385f70ddffab9798fd59e5', 'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '144bfad773686216ea39ba9d359d683c84df7ea9',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1231,7 +1193,7 @@ deps = {
# For Linux and Chromium OS. # For Linux and Chromium OS.
'src/third_party/cros_system_api': { 'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '67bca80707449bad87a17de8c937634ff1ab3272', 'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '6365a697667259c41b193f25b2f05205ebf443d8',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1241,13 +1203,13 @@ deps = {
}, },
'src/third_party/depot_tools': 'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '2c0a8c736a59044e4acc7be9e172343adc5c4310', Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'ecfab096397df1f8b266cdb380e057dc31dc0952',
'src/third_party/devtools-frontend/src': 'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'), Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': { 'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '5b416729821b589991d492f0707a087f5a47bb1f', 'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + 'd49a550e367233aa22a765b86583e02ad655abfd',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1255,10 +1217,10 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d', Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src': 'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b3bf8d6a13585ff248c079402654647d298de60b', Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '34780d8bd13d0af0cf17a22789ef286e8512594d',
'src/third_party/emoji-metadata/src': { 'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06', 'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1374,19 +1336,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
# Exists for rolling the Fuchsia SDK. Check out of the SDK should always
# rely on the hook running |update_sdk.py| script below.
'src/third_party/fuchsia-sdk/sdk': {
'packages': [
{
'package': Var('fuchsia_sdk_cipd_prefix') + '${{platform}}',
'version': Var('fuchsia_version'),
},
],
'condition': 'checkout_fuchsia_no_hooks',
'dep_type': 'cipd',
},
'src/third_party/hamcrest': { 'src/third_party/hamcrest': {
'packages': [ 'packages': [
{ {
@ -1462,7 +1411,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89', Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
'src/third_party/libaom/source/libaom': 'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '4ebecefe77953f226e620821fe441e24547a121f', Var('aomedia_git') + '/aom.git' + '@' + 'fd35001068a3d726e8184e1721e186dd50fd20e6',
'src/third_party/libavif/src': 'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'), Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1476,9 +1425,6 @@ deps = {
'src/third_party/ukey2/src': 'src/third_party/ukey2/src':
Var('chromium_git') + '/external/github.com/google/ukey2.git' + '@' + Var('ukey2_revision'), Var('chromium_git') + '/external/github.com/google/ukey2.git' + '@' + Var('ukey2_revision'),
'src/third_party/cros_components':
Var('chromium_git') + '/external/google3/cros_components.git' + '@' + Var('cros_components_revision'),
# Userspace interface to kernel DRM services. # Userspace interface to kernel DRM services.
'src/third_party/libdrm/src': { 'src/third_party/libdrm/src': {
'url': Var('chromium_git') + '/chromiumos/third_party/libdrm.git' + '@' + '56f81e6776c1c100c3f627b2c1feb9dcae2aad3c', 'url': Var('chromium_git') + '/chromiumos/third_party/libdrm.git' + '@' + '56f81e6776c1c100c3f627b2c1feb9dcae2aad3c',
@ -1518,7 +1464,7 @@ deps = {
}, },
'src/third_party/libunwindstack': { 'src/third_party/libunwindstack': {
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '8740b09bd1f8b81bdba92766afcb9df1d6a1f14e', 'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '3c86843ae0f8d560ae0d15b92e34ce88cf83057a',
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
@ -1532,7 +1478,7 @@ deps = {
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f', Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv': 'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '00950840d1c9bcbb3eb6ebc5aac5793e71166c8b', Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '3e38ce50589d9319badc0501f96d6c5b2b177472',
'src/third_party/lighttpd': { 'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'), 'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1618,7 +1564,7 @@ deps = {
}, },
'src/third_party/neon_2_sse/src': 'src/third_party/neon_2_sse/src':
Var('chromium_git') + '/external/github.com/intel/ARM_NEON_2_x86_SSE.git' + '@' + 'a15b489e1222b2087007546b4912e21293ea86ff', Var('chromium_git') + '/external/github.com/intel/ARM_NEON_2_x86_SSE.git' + '@' + '8dbe2461c89760ac4b204aa0eafb72413a97957d',
'src/third_party/netty-tcnative/src': { 'src/third_party/netty-tcnative/src': {
'url': Var('chromium_git') + '/external/netty-tcnative.git' + '@' + '7eeb50be90c9ba0f6afa3375132df63942a0f32d', 'url': Var('chromium_git') + '/external/netty-tcnative.git' + '@' + '7eeb50be90c9ba0f6afa3375132df63942a0f32d',
@ -1645,7 +1591,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6', Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src': 'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '940f6edf1274146fa1bfbda146b98d6aa16a0887', Var('chromium_git') + '/openscreen' + '@' + '9ed6b71cf22ae4558896f2efe254b5ce62d7c7a3',
'src/third_party/openxr/src': { 'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245', 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1662,7 +1608,7 @@ deps = {
}, },
'src/third_party/perfetto': 'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '280f0b23c5c8b98248cf0ccf3d011c4fd4bb74f5', Var('android_git') + '/platform/external/perfetto.git' + '@' + 'eee5bf9ccb87c0f8cf874974e9c8708491f038df',
'src/third_party/perl': { 'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3', 'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1696,7 +1642,7 @@ deps = {
}, },
'src/third_party/re2/src': 'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'd2836d1b1c34c4e330a85a1006201db474bf2c8a', Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'cc1c9db8bf5155d89d10d65998cdb226f676492c',
'src/third_party/r8': { 'src/third_party/r8': {
'packages': [ 'packages': [
@ -1716,7 +1662,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'Q3q0H5fP-O3El4ZE6Mg7vrySyorEF6YrGFs1gRr_PekC', 'version': '4DSZMtRXZITDo6YY90ljp92vzRT0eY52akTTR-2Jh7kC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1794,27 +1740,27 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f', Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src': 'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '14b52bb67edccf9f250085f83cc0e8aad03824f0', Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '620ab3e167b0a64b6732dac16fd0edaf8284cb8e',
'src/third_party/turbine': { 'src/third_party/turbine': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/turbine', 'package': 'chromium/third_party/turbine',
'version': 'rrpgWQ-uylo8c5IPgUVP464LwcVOmt29MqwsR59O_zkC', 'version': 'RXO2k7-PyXvbDjiK9EjbsheQfxXme2n0ABNX-MxR0JcC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@f310d85385dfddbe1deeb05deda1045593225710', 'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3811e73c4b6fff9ffc641719727171079c43972b',
'src/third_party/vulkan_memory_allocator': 'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907', Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
# Display server protocol for Linux. # Display server protocol for Linux.
'src/third_party/wayland/src': { 'src/third_party/wayland/src': {
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland.git' + '@' + 'c7473676b8abc682e820546287044cee3bca9147', 'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland.git' + '@' + 'e60398b1755bfcdf09f040d3769131fe0d9762fc',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1836,18 +1782,20 @@ deps = {
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
# Keep this to the same revision as the one .vpython3.
'src/third_party/webdriver/pylib': 'src/third_party/webdriver/pylib':
Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'fc5e7e70c098bfb189a9a74746809ad3c5c34e04', Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'd0045ec570c1a77612db35d1e92f05e1d27b4d53',
'src/third_party/webgl/src': 'src/third_party/webgl/src':
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2', Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src': 'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'eba1a78f3d741241b0dbee728561b61e9587a686', Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'dec4d77ee3e525c74ae69f77acf3c9c67dd7e7ce',
'src/third_party/webrtc': 'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + '93081d594f7efff72958a79251f53731b99e902b', Var('webrtc_git') + '/src.git' + '@' + 'eef098d1c7d50613d8bff2467d674525a9d0c57c',
'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
# Wuffs' canonical repository is at github.com/google/wuffs, but we use # Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file. # Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1865,7 +1813,7 @@ deps = {
}, },
'src/third_party/xnnpack/src': 'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'e8f74a9763aa36559980a0c2f37f587794995622', Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '8e3d3359f9bec608e09fac1f7054a2a14b1bd73c',
'src/tools/page_cycler/acid3': 'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521', Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1874,7 +1822,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/linux-amd64', 'package': 'skia/tools/goldctl/linux-amd64',
'version': 'c-P40DdzhvukIRQ1DgesE2cEEU8bTLcd4p_e3LL1--sC', 'version': 'XXyhhDL9TuGs6KyzXXakE4eaVnpYMXz8DKbnU5Ew3aAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1884,7 +1832,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/windows-amd64', 'package': 'skia/tools/goldctl/windows-amd64',
'version': 'wql7tuE1euGE1rj5JPT6w6ev6KYL3hWzY6HggTHgKZ8C', 'version': 'DCjjZXPp2-aMyq92DomOu_HdsBScNrumV-n3sIRYCfAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1895,7 +1843,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-amd64', 'package': 'skia/tools/goldctl/mac-amd64',
'version': 'OJJWEma6n1Cw5Ja1DQfdwbOFoFVp6071BB8VjklDcyYC', 'version': 'FyHlZdO9A-oMbWsU1rxpkz2dOtU4aOPBSJx9YQF_CLoC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1906,7 +1854,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-arm64', 'package': 'skia/tools/goldctl/mac-arm64',
'version': '9sTZ5XDqsy_Dj_v4NU3u4fLI_AGANp-zAJ3sof4rkwQC', 'version': 'K3uVqbmFCdJI0hGDotZSM9kPdXQNp7oIGy7pykJInqAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1917,7 +1865,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'), Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': { 'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@a63cd655ad37984fa08e1c95ca73acf55550f10d', 'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@5dafa2b13dc854da84660cc4b6f5ee6824fb8295',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1936,7 +1884,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/eche_app/app', 'package': 'chromeos_internal/apps/eche_app/app',
'version': '9yLWNtuRvV_dzod1dEYo01glLiFRGZ2yqhtYQapXSm4C', 'version': 'Ogm1R0DYIBfiOnmn4JGWi5dKm9oZ8t36ukBVWlXFp18C',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1947,7 +1895,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/help_app/app', 'package': 'chromeos_internal/apps/help_app/app',
'version': '5MAo0K1bcfWGI4F8OuSplMAOM13HLHbGLL85j8dVU7AC', 'version': 'FB1uYhNksQfUy3hgfRbQlj2gRCImGE92AyvjStDtLoUC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1958,7 +1906,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/media_app/app', 'package': 'chromeos_internal/apps/media_app/app',
'version': 'HfCwnAI0440kMmt917E1v9QJdzsNuNVfQQ86ehaVDscC', 'version': 'dSxPTWwzHZVCkFCCq4wKiCq4YbzDZLXlLj8fBVzc0X8C',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1969,7 +1917,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/projector_app/app', 'package': 'chromeos_internal/apps/projector_app/app',
'version': 'TaHxBUmYiVurXIHHo8Y5mOh7-SEnHbSCW7fn60_Wm54C', 'version': '_bQcxwT8nluFLpn_zf1IP97Fl5-_MaonRAN3xpowqCoC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -2504,17 +2452,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/com_google_android_annotations': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_annotations',
'version': 'version:2@4.1.1.4.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework': { 'src/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework': {
'packages': [ 'packages': [
{ {
@ -2849,7 +2786,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_code_gson_gson', 'package': 'chromium/third_party/android_deps/libs/com_google_code_gson_gson',
'version': 'version:2@2.9.0.cr1', 'version': 'version:2@2.8.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3146,7 +3083,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android', 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android',
'version': 'version:2@31.1-android.cr1', 'version': 'version:2@31.0-android.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3190,7 +3127,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite', 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite',
'version': 'version:2@3.21.1.cr1', 'version': 'version:2@3.19.3.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3241,83 +3178,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/io_grpc_grpc_api': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_api',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_binder': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_binder',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_context': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_context',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_core': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_core',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_protobuf_lite': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_protobuf_lite',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_grpc_grpc_stub': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_grpc_grpc_stub',
'version': 'version:2@1.49.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/io_perfmark_perfmark_api': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/io_perfmark_perfmark_api',
'version': 'version:2@0.25.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/javax_annotation_javax_annotation_api': { 'src/third_party/android_deps/libs/javax_annotation_javax_annotation_api': {
'packages': [ 'packages': [
{ {
@ -3432,18 +3292,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual',
'version': 'version:2@3.25.0.cr1', 'version': 'version:2@3.22.1.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_checkerframework_checker_util': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_util',
'version': 'version:2@3.25.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3465,7 +3314,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations', 'package': 'chromium/third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations',
'version': 'version:2@1.21.cr1', 'version': 'version:2@1.17.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -4289,6 +4138,7 @@ hooks = [
{ {
'name': 'test_fonts', 'name': 'test_fonts',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3', 'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py', 'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume', '--no_resume',
@ -4302,6 +4152,7 @@ hooks = [
{ {
'name': 'opus_test_files', 'name': 'opus_test_files',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_testdata',
'action': ['python3', 'action': ['python3',
'src/third_party/depot_tools/download_from_google_storage.py', 'src/third_party/depot_tools/download_from_google_storage.py',
'--no_auth', '--no_auth',
@ -4351,6 +4202,7 @@ hooks = [
{ {
'name': 'wasm_fuzzer', 'name': 'wasm_fuzzer',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3', 'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py', 'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume', '--no_resume',
@ -4507,6 +4359,7 @@ hooks = [
{ {
'name': 'maps_perf_test_load_dataset', 'name': 'maps_perf_test_load_dataset',
'pattern': '\\.sha1', 'pattern': '\\.sha1',
'condition': 'checkout_testdata',
'action': [ 'python3', 'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py', 'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume', '--no_resume',
@ -4552,6 +4405,7 @@ hooks = [
{ {
'name': 'zucchini_testdata', 'name': 'zucchini_testdata',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_testdata',
'action': [ 'python3', 'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py', 'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume', '--no_resume',
@ -4922,7 +4776,7 @@ hooks = [
{ {
'name': 'Fetch PGO profiles for mac arm', 'name': 'Fetch PGO profiles for mac arm',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_pgo_profiles and (checkout_mac or checkout_android)', 'condition': 'checkout_pgo_profiles and checkout_mac',
'action': [ 'python3', 'action': [ 'python3',
'src/tools/update_pgo_profiles.py', 'src/tools/update_pgo_profiles.py',
'--target=mac-arm', '--target=mac-arm',
@ -4975,17 +4829,6 @@ hooks = [
'condition': 'host_os == "win"', 'condition': 'host_os == "win"',
'action': ['python3', 'src/build/del_ninja_deps_cache.py'], 'action': ['python3', 'src/build/del_ninja_deps_cache.py'],
}, },
# Download test resources for the style perftest.
{
'name': 'style_perftest_files',
'pattern': '.',
'action': ['python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_auth',
'--quiet',
'--bucket', 'chromium-style-perftest',
'-d', 'src/third_party/blink/renderer/core/css/perftest_data'],
},
] ]
# Add any corresponding DEPS files from this list to chromium.exclusions in # Add any corresponding DEPS files from this list to chromium.exclusions in
@ -5002,7 +4845,4 @@ recursedeps = [
'src/third_party/vulkan-deps', 'src/third_party/vulkan-deps',
# src-internal has its own DEPS file to pull additional internal repos # src-internal has its own DEPS file to pull additional internal repos
'src-internal', 'src-internal',
# clank has its own DEPS file. This needs to be enabled only when it is
# removed from src-internal's recursedeps.
#'src/clank',
] ]

View File

@ -31,7 +31,6 @@ import("//build/config/logging.gni")
import("//build/config/nacl/config.gni") import("//build/config/nacl/config.gni")
import("//build/config/profiling/profiling.gni") import("//build/config/profiling/profiling.gni")
import("//build/config/rust.gni") import("//build/config/rust.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/sysroot.gni") import("//build/config/sysroot.gni")
import("//build/config/ui.gni") import("//build/config/ui.gni")
import("//build/rust/mixed_component.gni") import("//build/rust/mixed_component.gni")
@ -199,6 +198,7 @@ mixed_component("base") {
"allocator/dispatcher/reentry_guard.cc", "allocator/dispatcher/reentry_guard.cc",
"allocator/dispatcher/reentry_guard.h", "allocator/dispatcher/reentry_guard.h",
"allocator/dispatcher/subsystem.h", "allocator/dispatcher/subsystem.h",
"as_const.h",
"at_exit.cc", "at_exit.cc",
"at_exit.h", "at_exit.h",
"atomic_ref_count.h", "atomic_ref_count.h",
@ -218,13 +218,17 @@ mixed_component("base") {
"big_endian.cc", "big_endian.cc",
"big_endian.h", "big_endian.h",
"bind.h", "bind.h",
"bind_internal.h",
"bit_cast.h", "bit_cast.h",
"bits.h", "bits.h",
"build_time.cc", "build_time.cc",
"build_time.h", "build_time.h",
"callback.h", "callback.h",
"callback_forward.h", "callback_forward.h",
"callback_helpers.cc",
"callback_helpers.h", "callback_helpers.h",
"callback_internal.cc",
"callback_internal.h",
"callback_list.cc", "callback_list.cc",
"callback_list.h", "callback_list.h",
"cancelable_callback.h", "cancelable_callback.h",
@ -283,7 +287,6 @@ mixed_component("base") {
"cpu_reduction_experiment.h", "cpu_reduction_experiment.h",
"critical_closure.h", "critical_closure.h",
"cxx17_backports.h", "cxx17_backports.h",
"cxx20_is_constant_evaluated.h",
"cxx20_to_address.h", "cxx20_to_address.h",
"dcheck_is_on.h", "dcheck_is_on.h",
"debug/activity_analyzer.cc", "debug/activity_analyzer.cc",
@ -351,14 +354,6 @@ mixed_component("base") {
"files/scoped_temp_dir.cc", "files/scoped_temp_dir.cc",
"files/scoped_temp_dir.h", "files/scoped_temp_dir.h",
"format_macros.h", "format_macros.h",
"functional/bind.h",
"functional/bind_internal.h",
"functional/callback.h",
"functional/callback_forward.h",
"functional/callback_helpers.cc",
"functional/callback_helpers.h",
"functional/callback_internal.cc",
"functional/callback_internal.h",
"functional/function_ref.h", "functional/function_ref.h",
"functional/identity.h", "functional/identity.h",
"functional/invoke.h", "functional/invoke.h",
@ -545,18 +540,12 @@ mixed_component("base") {
"pending_task.h", "pending_task.h",
"pickle.cc", "pickle.cc",
"pickle.h", "pickle.h",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/moving_average.cc", "power_monitor/moving_average.cc",
"power_monitor/moving_average.h", "power_monitor/moving_average.h",
"power_monitor/power_monitor.cc", "power_monitor/power_monitor.cc",
"power_monitor/power_monitor.h", "power_monitor/power_monitor.h",
"power_monitor/power_monitor_device_source.cc", "power_monitor/power_monitor_device_source.cc",
"power_monitor/power_monitor_device_source.h", "power_monitor/power_monitor_device_source.h",
"power_monitor/power_monitor_features.cc",
"power_monitor/power_monitor_features.h",
"power_monitor/power_monitor_source.cc", "power_monitor/power_monitor_source.cc",
"power_monitor/power_monitor_source.h", "power_monitor/power_monitor_source.h",
"power_monitor/power_observer.h", "power_monitor/power_observer.h",
@ -796,7 +785,6 @@ mixed_component("base") {
"task/simple_task_executor.h", "task/simple_task_executor.h",
"task/single_thread_task_executor.cc", "task/single_thread_task_executor.cc",
"task/single_thread_task_executor.h", "task/single_thread_task_executor.h",
"task/single_thread_task_runner.cc",
"task/single_thread_task_runner.h", "task/single_thread_task_runner.h",
"task/single_thread_task_runner_thread_mode.h", "task/single_thread_task_runner_thread_mode.h",
"task/task_executor.cc", "task/task_executor.cc",
@ -962,7 +950,6 @@ mixed_component("base") {
"types/expected.h", "types/expected.h",
"types/expected_internal.h", "types/expected_internal.h",
"types/id_type.h", "types/id_type.h",
"types/optional_ref.h",
"types/optional_util.h", "types/optional_util.h",
"types/pass_key.h", "types/pass_key.h",
"types/strong_alias.h", "types/strong_alias.h",
@ -1083,7 +1070,11 @@ mixed_component("base") {
"message_loop/message_pump_win.cc", "message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h", "message_loop/message_pump_win.h",
"native_library_win.cc", "native_library_win.cc",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_win.cc", "power_monitor/battery_level_provider_win.cc",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/power_monitor_device_source_win.cc", "power_monitor/power_monitor_device_source_win.cc",
"power_monitor/speed_limit_observer_win.cc", "power_monitor/speed_limit_observer_win.cc",
"power_monitor/speed_limit_observer_win.h", "power_monitor/speed_limit_observer_win.h",
@ -1309,7 +1300,11 @@ mixed_component("base") {
"message_loop/message_pump_mac.h", "message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm", "message_loop/message_pump_mac.mm",
"native_library_mac.mm", "native_library_mac.mm",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_mac.mm", "power_monitor/battery_level_provider_mac.mm",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/battery_state_sampler_mac.cc", "power_monitor/battery_state_sampler_mac.cc",
"power_monitor/iopm_power_source_sampling_event_source.cc", "power_monitor/iopm_power_source_sampling_event_source.cc",
"power_monitor/iopm_power_source_sampling_event_source.h", "power_monitor/iopm_power_source_sampling_event_source.h",
@ -1549,6 +1544,9 @@ mixed_component("base") {
"//third_party/abseil-cpp:absl", "//third_party/abseil-cpp:absl",
] ]
# Windows cannot use the nodebug assertion handler because it doesn't support
# weak symbols, which are required to override the default libc++
# implementation.
if (use_custom_libcxx && !is_debug) { if (use_custom_libcxx && !is_debug) {
public_deps += [ ":nodebug_assertion" ] public_deps += [ ":nodebug_assertion" ]
} }
@ -2551,10 +2549,7 @@ buildflag_header("sanitizer_buildflags") {
header = "sanitizer_buildflags.h" header = "sanitizer_buildflags.h"
header_dir = "base" header_dir = "base"
flags = [ flags = [ "IS_HWASAN=$is_hwasan" ]
"IS_HWASAN=$is_hwasan",
"USING_SANITIZER=$using_sanitizer",
]
} }
buildflag_header("tracing_buildflags") { buildflag_header("tracing_buildflags") {

View File

@ -34,14 +34,6 @@ per-file callback_list*=pkasting@chromium.org
per-file feature_list*=asvitkine@chromium.org per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org per-file feature_list*=isherman@chromium.org
# Logging-related changes:
per-file check*=olivierli@chromium.org
per-file check*=pbos@chromium.org
per-file dcheck*=olivierli@chromium.org
per-file dcheck*=pbos@chromium.org
per-file logging*=olivierli@chromium.org
per-file logging*=pbos@chromium.org
# Restricted since rand_util.h also backs the cryptographically secure RNG. # Restricted since rand_util.h also backs the cryptographically secure RNG.
per-file rand_util*=set noparent per-file rand_util*=set noparent
per-file rand_util*=file://ipc/SECURITY_OWNERS per-file rand_util*=file://ipc/SECURITY_OWNERS

View File

@ -20,7 +20,7 @@ def CheckChangeLintsClean(input_api, output_api):
# The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch. # The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
# Only process those extensions which are used in Chromium, in directories # Only process those extensions which are used in Chromium, in directories
# that currently lint clean. # that currently lint clean.
CLEAN_CPP_FILES_ONLY = (r'base/win/.*\.(cc|h)$', ) CLEAN_CPP_FILES_ONLY = (r'base[\\/]win[\\/].*\.(cc|h)$', )
source_file_filter = lambda x: input_api.FilterSourceFile( source_file_filter = lambda x: input_api.FilterSourceFile(
x, x,
files_to_check=CLEAN_CPP_FILES_ONLY, files_to_check=CLEAN_CPP_FILES_ONLY,
@ -90,9 +90,9 @@ def _CheckNoTraceEventInclude(input_api, output_api):
r".*\.(h|cc|mm)$", r".*\.(h|cc|mm)$",
] ]
files_to_skip = [ files_to_skip = [
r".*/test/.*", r".*[\\/]test[\\/].*",
r".*/trace_event/.*", r".*[\\/]trace_event[\\/].*",
r".*/tracing/.*", r".*[\\/]tracing[\\/].*",
] ]
locations = _FindLocations(input_api, discouraged_includes, files_to_check, locations = _FindLocations(input_api, discouraged_includes, files_to_check,
@ -123,9 +123,9 @@ def _WarnPbzeroIncludes(input_api, output_api):
r".*\.(h|cc|mm)$", r".*\.(h|cc|mm)$",
] ]
files_to_skip = [ files_to_skip = [
r".*/test/.*", r".*[\\/]test[\\/].*",
r".*/trace_event/.*", r".*[\\/]trace_event[\\/].*",
r".*/tracing/.*", r".*[\\/]tracing[\\/].*",
] ]
locations = _FindLocations(input_api, warn_includes, files_to_check, locations = _FindLocations(input_api, warn_includes, files_to_check,

View File

@ -11,27 +11,8 @@
namespace base { namespace base {
namespace features { namespace features {
BASE_FEATURE(kPartitionAllocUnretainedDanglingPtr, const BASE_EXPORT Feature kPartitionAllocDanglingPtr{
"PartitionAllocUnretainedDanglingPtr", "PartitionAllocDanglingPtr", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<UnretainedDanglingPtrMode>::Option
kUnretainedDanglingPtrModeOption[] = {
{UnretainedDanglingPtrMode::kCrash, "crash"},
{UnretainedDanglingPtrMode::kDumpWithoutCrashing,
"dump_without_crashing"},
};
const base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam = {
&kPartitionAllocUnretainedDanglingPtr,
"mode",
UnretainedDanglingPtrMode::kDumpWithoutCrashing,
&kUnretainedDanglingPtrModeOption,
};
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = { constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"}, {DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"}, {DanglingPtrMode::kLogSignature, "log_signature"},
@ -46,46 +27,41 @@ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't // If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly. // disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan, const Feature kPartitionAllocPCScan{"PartitionAllocPCScan",
"PartitionAllocPCScan", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
#endif // defined(PA_ALLOW_PCSCAN) #endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition. // If enabled, PCScan is turned on only for the browser's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanBrowserOnly, const Feature kPartitionAllocPCScanBrowserOnly{
"PartitionAllocPCScanBrowserOnly", "PartitionAllocPCScanBrowserOnly", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan is turned on only for the renderer's malloc partition. // If enabled, PCScan is turned on only for the renderer's malloc partition.
BASE_FEATURE(kPartitionAllocPCScanRendererOnly, const Feature kPartitionAllocPCScanRendererOnly{
"PartitionAllocPCScanRendererOnly", "PartitionAllocPCScanRendererOnly", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, this instance belongs to the Control group of the BackupRefPtr // If enabled, this instance belongs to the Control group of the BackupRefPtr
// binary experiment. // binary experiment.
BASE_FEATURE(kPartitionAllocBackupRefPtrControl, const Feature kPartitionAllocBackupRefPtrControl{
"PartitionAllocBackupRefPtrControl", "PartitionAllocBackupRefPtrControl", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// Use a larger maximum thread cache cacheable bucket size. // Use a larger maximum thread cache cacheable bucket size.
BASE_FEATURE(kPartitionAllocLargeThreadCacheSize, const Feature kPartitionAllocLargeThreadCacheSize{
"PartitionAllocLargeThreadCacheSize", "PartitionAllocLargeThreadCacheSize",
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS) #if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_32_BITS)
// Not unconditionally enabled on 32 bit Android, since it is a // Not unconditionally enabled on 32 bit Android, since it is a more
// more memory-constrained platform. // memory-constrained platform.
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#else #else
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#endif #endif
); };
BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing, const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
"PartitionAllocLargeEmptySlotSpanRing", "PartitionAllocLargeEmptySlotSpanRing", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_FEATURE(kPartitionAllocBackupRefPtr, const Feature kPartitionAllocBackupRefPtr {
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX)) (BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
@ -93,7 +69,7 @@ BASE_FEATURE(kPartitionAllocBackupRefPtr,
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#endif #endif
); };
constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
kBackupRefPtrEnabledProcessesOptions[] = { kBackupRefPtrEnabledProcessesOptions[] = {
@ -139,9 +115,8 @@ const base::FeatureParam<bool> kBackupRefPtrAsanEnableInstantiationCheckParam{
// If enabled, switches the bucket distribution to an alternate one. Only one of // If enabled, switches the bucket distribution to an alternate one. Only one of
// these features may b e enabled at a time. // these features may b e enabled at a time.
BASE_FEATURE(kPartitionAllocUseAlternateDistribution, const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution{
"PartitionAllocUseAlternateDistribution", "PartitionAllocUseAlternateDistribution", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
const base::FeatureParam<AlternateBucketDistributionMode>::Option const base::FeatureParam<AlternateBucketDistributionMode>::Option
kPartitionAllocAlternateDistributionOption[] = { kPartitionAllocAlternateDistributionOption[] = {
{AlternateBucketDistributionMode::kDefault, "default"}, {AlternateBucketDistributionMode::kDefault, "default"},
@ -156,39 +131,34 @@ const base::FeatureParam<AlternateBucketDistributionMode>
// If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not // If enabled, switches PCScan scheduling to a mutator-aware scheduler. Does not
// affect whether PCScan is enabled itself. // affect whether PCScan is enabled itself.
BASE_FEATURE(kPartitionAllocPCScanMUAwareScheduler, const Feature kPartitionAllocPCScanMUAwareScheduler{
"PartitionAllocPCScanMUAwareScheduler", "PartitionAllocPCScanMUAwareScheduler", FEATURE_ENABLED_BY_DEFAULT};
FEATURE_ENABLED_BY_DEFAULT);
// If enabled, PCScan frees unconditionally all quarantined objects. // If enabled, PCScan frees unconditionally all quarantined objects.
// This is a performance testing feature. // This is a performance testing feature.
BASE_FEATURE(kPartitionAllocPCScanImmediateFreeing, const Feature kPartitionAllocPCScanImmediateFreeing{
"PartitionAllocPCScanImmediateFreeing", "PartitionAllocPCScanImmediateFreeing", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// If enabled, PCScan clears eagerly (synchronously) on free(). // If enabled, PCScan clears eagerly (synchronously) on free().
BASE_FEATURE(kPartitionAllocPCScanEagerClearing, const Feature kPartitionAllocPCScanEagerClearing{
"PartitionAllocPCScanEagerClearing", "PartitionAllocPCScanEagerClearing", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// In addition to heap, scan also the stack of the current mutator. // In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning, const Feature kPartitionAllocPCScanStackScanning {
"PartitionAllocPCScanStackScanning", "PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED) #if defined(PA_PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED) #endif // defined(PA_PCSCAN_STACK_SUPPORTED)
); };
BASE_FEATURE(kPartitionAllocDCScan, const Feature kPartitionAllocDCScan{"PartitionAllocDCScan",
"PartitionAllocDCScan", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// Whether to sort the active slot spans in PurgeMemory(). // Whether to sort the active slot spans in PurgeMemory().
BASE_FEATURE(kPartitionAllocSortActiveSlotSpans, extern const Feature kPartitionAllocSortActiveSlotSpans{
"PartitionAllocSortActiveSlotSpans", "PartitionAllocSortActiveSlotSpans", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
} // namespace features } // namespace features
} // namespace base } // namespace base

View File

@ -15,20 +15,12 @@
namespace base { namespace base {
namespace features { namespace features {
extern const BASE_EXPORT Feature kPartitionAllocUnretainedDanglingPtr;
enum class UnretainedDanglingPtrMode {
kCrash,
kDumpWithoutCrashing,
};
extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md // See /docs/dangling_ptr.md
// //
// Usage: // Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash // --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature // --enable-features=PartitionAllocDanglingPtr:mode/log_signature
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr); extern const BASE_EXPORT Feature kPartitionAllocDanglingPtr;
enum class DanglingPtrMode { enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr. // Crash immediately after detecting a dangling raw_ptr.
kCrash, // (default) kCrash, // (default)
@ -46,14 +38,14 @@ extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam; kDanglingPtrModeParam;
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan); extern const BASE_EXPORT Feature kPartitionAllocPCScan;
#endif // defined(PA_ALLOW_PCSCAN) #endif // defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly); extern const BASE_EXPORT Feature kPartitionAllocPCScanBrowserOnly;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly); extern const BASE_EXPORT Feature kPartitionAllocPCScanRendererOnly;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtrControl); extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtrControl;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeThreadCacheSize); extern const BASE_EXPORT Feature kPartitionAllocLargeThreadCacheSize;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing); extern const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
enum class BackupRefPtrEnabledProcesses { enum class BackupRefPtrEnabledProcesses {
@ -95,7 +87,7 @@ enum class AlternateBucketDistributionMode : uint8_t {
kDenser, kDenser,
}; };
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocBackupRefPtr); extern const BASE_EXPORT Feature kPartitionAllocBackupRefPtr;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses> extern const BASE_EXPORT base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam; kBackupRefPtrEnabledProcessesParam;
extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode> extern const BASE_EXPORT base::FeatureParam<BackupRefPtrMode>
@ -109,13 +101,13 @@ extern const BASE_EXPORT base::FeatureParam<bool>
extern const BASE_EXPORT base::FeatureParam<AlternateBucketDistributionMode> extern const BASE_EXPORT base::FeatureParam<AlternateBucketDistributionMode>
kPartitionAllocAlternateBucketDistributionParam; kPartitionAllocAlternateBucketDistributionParam;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanMUAwareScheduler); extern const BASE_EXPORT Feature kPartitionAllocPCScanMUAwareScheduler;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanStackScanning); extern const BASE_EXPORT Feature kPartitionAllocPCScanStackScanning;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDCScan); extern const BASE_EXPORT Feature kPartitionAllocDCScan;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanImmediateFreeing); extern const BASE_EXPORT Feature kPartitionAllocPCScanImmediateFreeing;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanEagerClearing); extern const BASE_EXPORT Feature kPartitionAllocPCScanEagerClearing;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocSortActiveSlotSpans); extern const BASE_EXPORT Feature kPartitionAllocSortActiveSlotSpans;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocUseAlternateDistribution); extern const BASE_EXPORT Feature kPartitionAllocUseAlternateDistribution;
} // namespace features } // namespace features
} // namespace base } // namespace base

View File

@ -5,7 +5,6 @@
#include "base/allocator/partition_alloc_support.h" #include "base/allocator/partition_alloc_support.h"
#include <array> #include <array>
#include <cinttypes>
#include <cstdint> #include <cstdint>
#include <map> #include <map>
#include <string> #include <string>
@ -15,7 +14,6 @@
#include "base/allocator/partition_allocator/allocation_guard.h" #include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h" #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h" #include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
@ -24,9 +22,7 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/callback.h" #include "base/callback.h"
#include "base/check.h" #include "base/check.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/stack_trace.h" #include "base/debug/stack_trace.h"
#include "base/debug/task_trace.h"
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/immediate_crash.h" #include "base/immediate_crash.h"
#include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_functions.h"
@ -486,22 +482,23 @@ void DanglingRawPtrReleasedCrash(uintptr_t id) {
// allowed. In particular, symbolizing and printing the StackTraces may // allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory. // allocate memory.
debug::StackTrace stack_trace_release; debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id); absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) { if (stack_trace_free) {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << StringPrintf(
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" "Detected dangling raw_ptr with id=0x%016" PRIxPTR
<< "The memory was freed at:\n" ":\n\n"
<< *stack_trace_free << "\n" "The memory was freed at:\n%s\n"
<< "The dangling raw_ptr was released at:\n" "The dangling raw_ptr was released at:\n%s",
<< stack_trace_release << task_trace_release; id, stack_trace_free->ToString().c_str(),
stack_trace_release.ToString().c_str());
} else { } else {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << StringPrintf(
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" "Detected dangling raw_ptr with id=0x%016" PRIxPTR
<< "It was not recorded where the memory was freed.\n\n" ":\n\n"
<< "The dangling raw_ptr was released at:\n" "It was not recorded where the memory was freed.\n\n"
<< stack_trace_release << task_trace_release; "The dangling raw_ptr was released at:\n%s",
id, stack_trace_release.ToString().c_str());
} }
IMMEDIATE_CRASH(); IMMEDIATE_CRASH();
} }
@ -546,40 +543,5 @@ void InstallDanglingRawPtrChecks() {
void InstallDanglingRawPtrChecks() {} void InstallDanglingRawPtrChecks() {}
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
void UnretainedDanglingRawPtrDetectedDumpWithoutCrashing(uintptr_t id) {
PA_NO_CODE_FOLDING();
debug::DumpWithoutCrashing();
}
void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
debug::TaskTrace task_trace;
debug::StackTrace stack_trace;
LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< task_trace << stack_trace;
IMMEDIATE_CRASH();
}
void InstallUnretainedDanglingRawPtrChecks() {
if (!FeatureList::IsEnabled(features::kPartitionAllocUnretainedDanglingPtr)) {
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/false);
return;
}
partition_alloc::SetUnretainedDanglingRawPtrCheckEnabled(/*enabled=*/true);
switch (features::kUnretainedDanglingPtrModeParam.Get()) {
case features::UnretainedDanglingPtrMode::kCrash:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedCrash);
break;
case features::UnretainedDanglingPtrMode::kDumpWithoutCrashing:
partition_alloc::SetUnretainedDanglingRawPtrDetectedFn(
&UnretainedDanglingRawPtrDetectedDumpWithoutCrashing);
break;
}
}
} // namespace allocator } // namespace allocator
} // namespace base } // namespace base

View File

@ -35,7 +35,6 @@ BASE_EXPORT std::map<std::string, std::string> ProposeSyntheticFinchTrials();
// This is currently effective, only when compiled with // This is currently effective, only when compiled with
// `enable_dangling_raw_ptr_checks` build flag. // `enable_dangling_raw_ptr_checks` build flag.
BASE_EXPORT void InstallDanglingRawPtrChecks(); BASE_EXPORT void InstallDanglingRawPtrChecks();
BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
} // namespace allocator } // namespace allocator
} // namespace base } // namespace base

View File

@ -1,7 +0,0 @@
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"

View File

@ -115,7 +115,6 @@ component("partition_alloc") {
"partition_alloc_base/scoped_clear_last_error.h", "partition_alloc_base/scoped_clear_last_error.h",
"partition_alloc_base/strings/stringprintf.cc", "partition_alloc_base/strings/stringprintf.cc",
"partition_alloc_base/strings/stringprintf.h", "partition_alloc_base/strings/stringprintf.h",
"partition_alloc_base/system/sys_info.h",
"partition_alloc_base/thread_annotations.h", "partition_alloc_base/thread_annotations.h",
"partition_alloc_base/threading/platform_thread.cc", "partition_alloc_base/threading/platform_thread.cc",
"partition_alloc_base/threading/platform_thread.h", "partition_alloc_base/threading/platform_thread.h",
@ -259,23 +258,11 @@ component("partition_alloc") {
sources += [ sources += [
"partition_alloc_base/mac/foundation_util.h", "partition_alloc_base/mac/foundation_util.h",
"partition_alloc_base/mac/foundation_util.mm", "partition_alloc_base/mac/foundation_util.mm",
"partition_alloc_base/mac/mac_util.h",
"partition_alloc_base/mac/mac_util.mm",
"partition_alloc_base/mac/scoped_cftyperef.h", "partition_alloc_base/mac/scoped_cftyperef.h",
"partition_alloc_base/mac/scoped_typeref.h", "partition_alloc_base/mac/scoped_typeref.h",
] ]
if (is_ios) {
sources += [
"partition_alloc_base/ios/ios_util.h",
"partition_alloc_base/ios/ios_util.mm",
"partition_alloc_base/system/sys_info_ios.mm",
]
}
if (is_mac) {
sources += [
"partition_alloc_base/mac/mac_util.h",
"partition_alloc_base/mac/mac_util.mm",
"partition_alloc_base/system/sys_info_mac.mm",
]
}
} }
if (build_with_chromium) { if (build_with_chromium) {
if (current_cpu == "x64") { if (current_cpu == "x64") {
@ -331,11 +318,8 @@ component("partition_alloc") {
frameworks += [ "Security.framework" ] frameworks += [ "Security.framework" ]
} }
if (is_apple) { if (is_component_build && is_apple) {
frameworks += [ frameworks += [ "CoreFoundation.framework" ]
"CoreFoundation.framework",
"Foundation.framework",
]
} }
configs += [ "//build/config/compiler:wexit_time_destructors" ] configs += [ "//build/config/compiler:wexit_time_destructors" ]
@ -368,10 +352,6 @@ buildflag_header("partition_alloc_buildflags") {
_enable_gwp_asan_support = _enable_backup_ref_ptr_support _enable_gwp_asan_support = _enable_backup_ref_ptr_support
# Shadow metadata only supports Linux now.
_enable_shadow_metadata_for_64_bits_pointers =
enable_shadow_metadata && is_linux
# The only BRP mode that GWP-ASan supports is the "previous slot" mode. # The only BRP mode that GWP-ASan supports is the "previous slot" mode.
# This is because it requires out-of-line ref count storage for system # This is because it requires out-of-line ref count storage for system
# page aligned allocations. # page aligned allocations.
@ -397,15 +377,10 @@ buildflag_header("partition_alloc_buildflags") {
"RECORD_ALLOC_INFO=$_record_alloc_info", "RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap", "USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$_enable_shadow_metadata_for_64_bits_pointers", "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
# *Scan is currently only used by Chromium. # *Scan is currently only used by Chromium.
"STARSCAN=$build_with_chromium", "STARSCAN=$build_with_chromium",
# We can only use `//base/tracing` when building in Chromium.
"PA_USE_BASE_TRACING=$build_with_chromium",
"ENABLE_PKEYS=$enable_pkeys",
] ]
} }

View File

@ -5,51 +5,6 @@
# PartitionAlloc is planned to be extracted into a standalone library, and # PartitionAlloc is planned to be extracted into a standalone library, and
# therefore dependencies need to be strictly controlled and minimized. # therefore dependencies need to be strictly controlled and minimized.
# Only these hosts are allowed for dependencies in this DEPS file.
# This is a subset of chromium/src/DEPS's allowed_hosts.
allowed_hosts = [
'chromium.googlesource.com',
]
vars = {
'chromium_git': 'https://chromium.googlesource.com',
}
deps = {
'partition_allocator/buildtools/clang_format/script':
Var('chromium_git') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git',
'partition_allocator/buildtools/linux64': {
'packages': [
{
'package': 'gn/gn/linux-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "linux"',
},
'partition_allocator/buildtools/mac': {
'packages': [
{
'package': 'gn/gn/mac-${{arch}}',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "mac"',
},
'partition_allocator/buildtools/win': {
'packages': [
{
'package': 'gn/gn/windows-amd64',
'version': 'latest',
}
],
'dep_type': 'cipd',
'condition': 'host_os == "win"',
},
}
noparent = True noparent = True
include_rules = [ include_rules = [

View File

@ -12,10 +12,6 @@ details. For implementation details, see the comments in
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and * [Build Config](./build_config.md): Pertinent GN args, buildflags, and
macros. macros.
* [Chrome-External Builds](./external_builds.md): Further considerations
for standalone PartitionAlloc, plus an embedder's guide for some extra
GN args.
## Overview ## Overview
PartitionAlloc is a memory allocator optimized for space efficiency, PartitionAlloc is a memory allocator optimized for space efficiency,

View File

@ -47,14 +47,18 @@ void DecommitPages(uintptr_t address, size_t size) {
} // namespace } // namespace
void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) { pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask)); PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask)); PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
PA_CHECK(handle > 0 && handle <= std::size(pools_));
Pool* pool = GetPool(handle); for (pool_handle i = 0; i < std::size(pools_); ++i) {
PA_CHECK(!pool->IsInitialized()); if (!pools_[i].IsInitialized()) {
pool->Initialize(ptr, length); pools_[i].Initialize(ptr, length);
return i + 1;
}
}
PA_NOTREACHED();
return 0;
} }
void AddressPoolManager::GetPoolUsedSuperPages( void AddressPoolManager::GetPoolUsedSuperPages(
@ -285,12 +289,12 @@ void AddressPoolManager::GetPoolStats(const pool_handle handle,
bool AddressPoolManager::GetStats(AddressSpaceStats* stats) { bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get 64-bit pool stats. // Get 64-bit pool stats.
GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats); GetPoolStats(GetRegularPool(), &stats->regular_pool_stats);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats); GetPoolStats(GetBRPPool(), &stats->brp_pool_stats);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (IsConfigurablePoolAvailable()) { if (IsConfigurablePoolAvailable()) {
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats); GetPoolStats(GetConfigurablePool(), &stats->configurable_pool_stats);
} }
return true; return true;
} }

View File

@ -54,7 +54,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
AddressPoolManager& operator=(const AddressPoolManager&) = delete; AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length); pool_handle Add(uintptr_t address, size_t length);
void Remove(pool_handle handle); void Remove(pool_handle handle);
// Populate a |used| bitset of superpages currently in use. // Populate a |used| bitset of superpages currently in use.
@ -65,12 +65,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
uintptr_t GetPoolBaseAddress(pool_handle handle); uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif #endif
// Reserves address space from the pool. // Reserves address space from GigaCage.
uintptr_t Reserve(pool_handle handle, uintptr_t Reserve(pool_handle handle,
uintptr_t requested_address, uintptr_t requested_address,
size_t length); size_t length);
// Frees address space back to the pool and decommits underlying system pages. // Frees address space back to GigaCage and decommits underlying system pages.
void UnreserveAndDecommit(pool_handle handle, void UnreserveAndDecommit(pool_handle handle,
uintptr_t address, uintptr_t address,
size_t length); size_t length);
@ -158,9 +158,22 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
#endif // defined(PA_HAS_64_BITS_POINTERS) #endif // defined(PA_HAS_64_BITS_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_; static AddressPoolManager singleton_;
}; };
PA_ALWAYS_INLINE pool_handle GetRegularPool() {
return kRegularPoolHandle;
}
PA_ALWAYS_INLINE pool_handle GetBRPPool() {
return kBRPPoolHandle;
}
PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
PA_DCHECK(IsConfigurablePoolAvailable());
return kConfigurablePoolHandle;
}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -111,17 +111,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap {
static bool IsAllowedSuperPageForBRPPool(uintptr_t address) { static bool IsAllowedSuperPageForBRPPool(uintptr_t address) {
// The only potentially dangerous scenario, in which this check is used, is // The only potentially dangerous scenario, in which this check is used, is
// when the assignment of the first raw_ptr<T> object for an address // when the assignment of the first raw_ptr<T> object for a non-GigaCage
// allocated outside the BRP pool is racing with the allocation of a new // address is racing with the allocation of a new GigCage super-page at the
// super page at the same address. We assume that if raw_ptr<T> is being // same address. We assume that if raw_ptr<T> is being initialized with a
// initialized with a raw pointer, the associated allocation is "alive"; // raw pointer, the associated allocation is "alive"; otherwise, the issue
// otherwise, the issue should be fixed by rewriting the raw pointer // should be fixed by rewriting the raw pointer variable as raw_ptr<T>.
// variable as raw_ptr<T>. In the worst case, when such a fix is // In the worst case, when such a fix is impossible, we should just undo the
// impossible, we should just undo the raw pointer -> raw_ptr<T> rewrite of // raw pointer -> raw_ptr<T> rewrite of the problematic field. If the
// the problematic field. If the above assumption holds, the existing // above assumption holds, the existing allocation will prevent us from
// allocation will prevent us from reserving the super-page region and, // reserving the super-page region and, thus, having the race condition.
// thus, having the race condition. Since we rely on that external // Since we rely on that external synchronization, the relaxed memory
// synchronization, the relaxed memory ordering should be sufficient. // ordering should be sufficient.
return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load( return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load(
std::memory_order_relaxed); std::memory_order_relaxed);
} }

View File

@ -17,7 +17,7 @@ namespace partition_alloc {
struct PoolStats { struct PoolStats {
size_t usage = 0; size_t usage = 0;
// On 32-bit, pools are mainly logical entities, intermingled with // On 32-bit, GigaCage is mainly a logical entity, intermingled with
// allocations not managed by PartitionAlloc. The "largest available // allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case. // reservation" is not possible to measure in that case.
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)

View File

@ -12,9 +12,6 @@ namespace partition_alloc {
namespace { namespace {
DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {}; DanglingRawPtrDetectedFn* g_dangling_raw_ptr_detected_fn = [](uintptr_t) {};
DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {}; DanglingRawPtrReleasedFn* g_dangling_raw_ptr_released_fn = [](uintptr_t) {};
DanglingRawPtrDetectedFn* g_unretained_dangling_raw_ptr_detected_fn =
[](uintptr_t) {};
bool g_unretained_dangling_raw_ptr_check_enabled = false;
} // namespace } // namespace
DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() { DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn() {
@ -37,21 +34,6 @@ void SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedFn fn) {
g_dangling_raw_ptr_released_fn = fn; g_dangling_raw_ptr_released_fn = fn;
} }
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn() {
return g_unretained_dangling_raw_ptr_detected_fn;
}
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn* fn) {
PA_DCHECK(fn);
g_unretained_dangling_raw_ptr_detected_fn = fn;
}
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled) {
bool old = g_unretained_dangling_raw_ptr_check_enabled;
g_unretained_dangling_raw_ptr_check_enabled = enabled;
return old;
}
namespace internal { namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) { PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id) {
@ -61,15 +43,5 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id) {
g_dangling_raw_ptr_released_fn(id); g_dangling_raw_ptr_released_fn(id);
} }
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id) {
g_unretained_dangling_raw_ptr_detected_fn(id);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled() {
return g_unretained_dangling_raw_ptr_check_enabled;
}
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -35,13 +35,6 @@ DanglingRawPtrDetectedFn* GetDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn); void SetDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
DanglingRawPtrDetectedFn* GetUnretainedDanglingRawPtrDetectedFn();
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void SetUnretainedDanglingRawPtrDetectedFn(DanglingRawPtrDetectedFn*);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool SetUnretainedDanglingRawPtrCheckEnabled(bool enabled);
// DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the // DanglingRawPtrReleased: Called after DanglingRawPtrDetected(id), once the
// last dangling raw_ptr stops referencing the memory region. // last dangling raw_ptr stops referencing the memory region.
// //
@ -56,10 +49,6 @@ namespace internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id); PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id); PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DanglingRawPtrReleased(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void UnretainedDanglingRawPtrDetected(uintptr_t id);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsUnretainedDanglingRawPtrCheckEnabled();
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -5,7 +5,6 @@
#include "base/allocator/partition_allocator/extended_api.h" #include "base/allocator/partition_allocator/extended_api.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h" #include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
#include "base/allocator/partition_allocator/thread_cache.h" #include "base/allocator/partition_allocator/thread_cache.h"

View File

@ -25,18 +25,6 @@ PartitionAlloc's build will expect them at
In addition, something must provide `build_with_chromium = false` to In addition, something must provide `build_with_chromium = false` to
the PA build system. the PA build system.
## Periodic Memory Reduction Routines
PartitionAlloc provides APIs to
* reclaim memory (see [memory\_reclaimer.h](./memory_reclaimer.h)) and
* purge thread caches (see [thread\_cache.h](./thread_cache.h)).
Both of these must be called by the embedder external to PartitionAlloc.
PA provides neither an event loop nor timers of its own, delegating this
to its clients.
## Build Considerations ## Build Considerations
External clients create constraints on PartitionAlloc's implementation. External clients create constraints on PartitionAlloc's implementation.

View File

@ -89,24 +89,18 @@ Buckets consist of slot spans, organized as linked lists (see below).
holds some not-too-large memory chunks, ready to be allocated. This holds some not-too-large memory chunks, ready to be allocated. This
speeds up in-thread allocation by reducing a lock hold to a speeds up in-thread allocation by reducing a lock hold to a
thread-local storage lookup, improving cache locality. thread-local storage lookup, improving cache locality.
* **Pool**: A large (and contiguous on 64-bit) virtual address region, housing * **GigaCage**: A memory region several gigabytes wide, reserved by
super pages, etc. from which PartitionAlloc services allocations. The PartitionAlloc upon initialization, from which all allocations are
primary purpose of the pools is to provide a fast answer to the taken. The motivation for GigaCage is for code to be able to examine
question, "Did PartitionAlloc allocate the memory for this pointer a pointer and to immediately determine whether or not the memory was
from this pool?" with a single bit-masking operation. allocated by PartitionAlloc. This provides support for a number of
* The regular pool is a general purpose pool that contains allocations that features, including
aren't protected by BackupRefPtr. [StarScan][starscan-readme] and
* The BRP pool contains all allocations protected by BackupRefPtr. [BackupRefPtr][brp-doc].
* [64-bit only] The configurable pool is named generically, because its * Note that GigaCage only exists in builds with 64-bit pointers.
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime, * In builds with 32-bit pointers, PartitionAlloc tracks pointers
providing a pre-existing mapping. Its allocations aren't protected by it dispenses with a bitmap. This is often referred to as "fake
BackupRefPtr. GigaCage" (or simply "GigaCage") for lack of a better term.
*** promo
Pools are downgraded into a logical concept in 32-bit environments,
tracking a non-contiguous set of allocations using a bitmap.
***
* **Payload**: The usable area of a super page in which slot spans * **Payload**: The usable area of a super page in which slot spans
reside. While generally this means "everything between the first reside. While generally this means "everything between the first
and last guard partition pages in a super page," the presence of and last guard partition pages in a super page," the presence of
@ -130,19 +124,6 @@ By "slow" we may mean something as simple as extra logic (`if`
statements etc.), or something as costly as system calls. statements etc.), or something as costly as system calls.
*** ***
## Legacy Terms
These terms are (mostly) deprecated and should not be used. They are
surfaced here to provide a ready reference for readers coming from
older design documents or documentation.
* **GigaCage**: A memory region several gigabytes wide, reserved by
PartitionAlloc upon initialization, from which nearly all allocations
are taken. _Pools_ have overtaken GigaCage in conceptual importance,
and so and so there is less need today to refer to "GigaCage" or the
"cage." This is especially true given the V8 Sandbox and the
configurable pool (see above).
## PartitionAlloc-Everywhere ## PartitionAlloc-Everywhere
Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine). Originally, PartitionAlloc was used only in Blink (Chromium's rendering engine).
@ -172,4 +153,5 @@ As of 2022, PartitionAlloc-Everywhere is supported on
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37 [max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h [pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview# [starscan-readme]: https://chromium.googlesource.com/chromium/src/+/main/base/allocator/partition_allocator/starscan/README.md
[brp-doc]: https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/preview

View File

@ -19,8 +19,10 @@ namespace partition_alloc {
// Posts and handles memory reclaim tasks for PartitionAlloc. // Posts and handles memory reclaim tasks for PartitionAlloc.
// //
// PartitionAlloc users are responsible for scheduling and calling the // Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
// reclamation methods with their own timers / event loops. // called from any thread, concurrently with reclaim. Reclaim itself runs in the
// context of the provided |SequencedTaskRunner|, meaning that the caller must
// take care of this runner being compatible with the various partitions.
// //
// Singleton as this runs as long as the process is alive, and // Singleton as this runs as long as the process is alive, and
// having multiple instances would be wasteful. // having multiple instances would be wasteful.

View File

@ -50,19 +50,17 @@ uintptr_t AllocPagesIncludingReserved(
uintptr_t address, uintptr_t address,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
int file_descriptor_for_shared_alloc = -1) {
uintptr_t ret = uintptr_t ret =
internal::SystemAllocPages(address, length, accessibility, page_tag, internal::SystemAllocPages(address, length, accessibility, page_tag);
file_descriptor_for_shared_alloc);
if (!ret) { if (!ret) {
const bool cant_alloc_length = internal::kHintIsAdvisory || !address; const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
if (cant_alloc_length) { if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address // The system cannot allocate |length| bytes. Release any reserved address
// space and try once more. // space and try once more.
ReleaseReservation(); ReleaseReservation();
ret = internal::SystemAllocPages(address, length, accessibility, page_tag, ret =
file_descriptor_for_shared_alloc); internal::SystemAllocPages(address, length, accessibility, page_tag);
} }
} }
return ret; return ret;
@ -129,12 +127,11 @@ namespace internal {
uintptr_t SystemAllocPages(uintptr_t hint, uintptr_t SystemAllocPages(uintptr_t hint,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
int file_descriptor_for_shared_alloc) {
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret = internal::SystemAllocPagesInternal( uintptr_t ret =
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc); internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
if (ret) if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed); g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
@ -146,10 +143,9 @@ uintptr_t SystemAllocPages(uintptr_t hint,
uintptr_t AllocPages(size_t length, uintptr_t AllocPages(size_t length,
size_t align, size_t align,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
int file_descriptor_for_shared_alloc) { return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, page_tag, page_tag);
file_descriptor_for_shared_alloc);
} }
uintptr_t AllocPages(uintptr_t address, uintptr_t AllocPages(uintptr_t address,
size_t length, size_t length,
@ -175,8 +171,7 @@ uintptr_t AllocPagesWithAlignOffset(
size_t align, size_t align,
size_t align_offset, size_t align_offset,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
int file_descriptor_for_shared_alloc) {
PA_DCHECK(length >= internal::PageAllocationGranularity()); PA_DCHECK(length >= internal::PageAllocationGranularity());
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
PA_DCHECK(align >= internal::PageAllocationGranularity()); PA_DCHECK(align >= internal::PageAllocationGranularity());
@ -206,8 +201,7 @@ uintptr_t AllocPagesWithAlignOffset(
for (int i = 0; i < kExactSizeTries; ++i) { for (int i = 0; i < kExactSizeTries; ++i) {
uintptr_t ret = uintptr_t ret =
AllocPagesIncludingReserved(address, length, accessibility, page_tag, AllocPagesIncludingReserved(address, length, accessibility, page_tag);
file_descriptor_for_shared_alloc);
if (ret) { if (ret) {
// If the alignment is to our liking, we're done. // If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset) if ((ret & align_offset_mask) == align_offset)
@ -240,9 +234,8 @@ uintptr_t AllocPagesWithAlignOffset(
do { do {
// Continue randomizing only on POSIX. // Continue randomizing only on POSIX.
address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0; address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
ret = ret = AllocPagesIncludingReserved(address, try_length, accessibility,
AllocPagesIncludingReserved(address, try_length, accessibility, page_tag);
page_tag, file_descriptor_for_shared_alloc);
// The retries are for Windows, where a race can steal our mapping on // The retries are for Windows, where a race can steal our mapping on
// resize. // resize.
} while (ret && (ret = TrimMapping(ret, try_length, length, align, } while (ret && (ret = TrimMapping(ret, try_length, length, align,
@ -316,8 +309,7 @@ void RecommitSystemPages(
PageAccessibilityDisposition accessibility_disposition) { PageAccessibilityDisposition accessibility_disposition) {
PA_DCHECK(!(address & internal::SystemPageOffsetMask())); PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask())); PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility.permissions != PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
PageAccessibilityConfiguration::kInaccessible);
internal::RecommitSystemPagesInternal(address, length, accessibility, internal::RecommitSystemPagesInternal(address, length, accessibility,
accessibility_disposition); accessibility_disposition);
} }
@ -331,8 +323,7 @@ bool TryRecommitSystemPages(
// crashing case. // crashing case.
PA_DCHECK(!(address & internal::SystemPageOffsetMask())); PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
PA_DCHECK(!(length & internal::SystemPageOffsetMask())); PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
PA_DCHECK(accessibility.permissions != PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
PageAccessibilityConfiguration::kInaccessible);
return internal::TryRecommitSystemPagesInternal( return internal::TryRecommitSystemPagesInternal(
address, length, accessibility, accessibility_disposition); address, length, accessibility, accessibility_disposition);
} }

View File

@ -11,13 +11,11 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc { namespace partition_alloc {
struct PageAccessibilityConfiguration { enum class PageAccessibilityConfiguration {
enum Permissions {
kInaccessible, kInaccessible,
kRead, kRead,
kReadWrite, kReadWrite,
@ -31,23 +29,6 @@ struct PageAccessibilityConfiguration {
// This flag is deprecated and will go away soon. // This flag is deprecated and will go away soon.
// TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages. // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
kReadWriteExecute, kReadWriteExecute,
};
#if BUILDFLAG(ENABLE_PKEYS)
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {}
#else
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS)
Permissions permissions;
#if BUILDFLAG(ENABLE_PKEYS)
// Tag the page with a Memory Protection Key. Use 0 for none.
int pkey;
#endif // BUILDFLAG(ENABLE_PKEYS)
}; };
// Use for De/RecommitSystemPages API. // Use for De/RecommitSystemPages API.
@ -97,17 +78,12 @@ uintptr_t NextAlignedWithOffset(uintptr_t ptr,
// |page_tag| is used on some platforms to identify the source of the // |page_tag| is used on some platforms to identify the source of the
// allocation. Use PageTag::kChromium as a catch-all category. // allocation. Use PageTag::kChromium as a catch-all category.
// //
// |file_descriptor_for_shared_alloc| is only used in mapping the shadow
// pools to the same physical address as the real one in
// PartitionAddressSpace::Init(). It should be ignored in other cases.
//
// This call will return 0/nullptr if the allocation cannot be satisfied. // This call will return 0/nullptr if the allocation cannot be satisfied.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(size_t length, uintptr_t AllocPages(size_t length,
size_t align, size_t align,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag);
int file_descriptor_for_shared_alloc = -1);
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
uintptr_t AllocPages(uintptr_t address, uintptr_t AllocPages(uintptr_t address,
size_t length, size_t length,
@ -127,8 +103,7 @@ uintptr_t AllocPagesWithAlignOffset(
size_t align, size_t align,
size_t align_offset, size_t align_offset,
PageAccessibilityConfiguration page_accessibility, PageAccessibilityConfiguration page_accessibility,
PageTag page_tag, PageTag page_tag);
int file_descriptor_for_shared_alloc = -1);
// Frees one or more pages starting at |address| and continuing for |length| // Frees one or more pages starting at |address| and continuing for |length|
// bytes. // bytes.

View File

@ -77,7 +77,7 @@ PageAllocationGranularityShift() {
// compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice // compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
// here. // here.
return 16; // 64kB return 16; // 64kB
#elif defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64) #elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS) #elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift); return static_cast<size_t>(vm_page_shift);

View File

@ -15,8 +15,7 @@ namespace partition_alloc::internal {
uintptr_t SystemAllocPages(uintptr_t hint, uintptr_t SystemAllocPages(uintptr_t hint,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag);
int file_descriptor_for_shared_alloc = -1);
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -45,7 +45,7 @@ const char* PageTagToName(PageTag tag) {
zx_vm_option_t PageAccessibilityToZxVmOptions( zx_vm_option_t PageAccessibilityToZxVmOptions(
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) { switch (accessibility) {
case PageAccessibilityConfiguration::kRead: case PageAccessibilityConfiguration::kRead:
return ZX_VM_PERM_READ; return ZX_VM_PERM_READ;
case PageAccessibilityConfiguration::kReadWrite: case PageAccessibilityConfiguration::kReadWrite:
@ -72,12 +72,10 @@ constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{0}; std::atomic<int32_t> s_allocPageErrorCode{0};
uintptr_t SystemAllocPagesInternal( uintptr_t SystemAllocPagesInternal(uintptr_t hint,
uintptr_t hint,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
zx::vmo vmo; zx::vmo vmo;
zx_status_t status = zx::vmo::create(length, 0, &vmo); zx_status_t status = zx::vmo::create(length, 0, &vmo);
if (status != ZX_OK) { if (status != ZX_OK) {

View File

@ -16,7 +16,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
int GetAccessFlags(PageAccessibilityConfiguration accessibility) { int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) { switch (accessibility) {
case PageAccessibilityConfiguration::kRead: case PageAccessibilityConfiguration::kRead:
return PROT_READ; return PROT_READ;
case PageAccessibilityConfiguration::kReadWriteTagged: case PageAccessibilityConfiguration::kReadWriteTagged:

View File

@ -21,13 +21,7 @@
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h" #include "base/allocator/partition_allocator/partition_alloc_base/mac/foundation_util.h"
#if BUILDFLAG(IS_IOS)
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#elif BUILDFLAG(IS_MAC)
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h" #include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h"
#else
#error "Unknown platform"
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h" #include "base/allocator/partition_allocator/partition_alloc_base/mac/scoped_cftyperef.h"
#include <Availability.h> #include <Availability.h>
@ -148,18 +142,15 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility);
uintptr_t SystemAllocPagesInternal(uintptr_t hint, uintptr_t SystemAllocPagesInternal(uintptr_t hint,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
int file_descriptor_for_shared_alloc) {
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
// Use a custom tag to make it easier to distinguish Partition Alloc regions // Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported. // in vmmap(1). Tags between 240-255 are supported.
PA_DCHECK(PageTag::kFirst <= page_tag); PA_DCHECK(PageTag::kFirst <= page_tag);
PA_DCHECK(PageTag::kLast >= page_tag); PA_DCHECK(PageTag::kLast >= page_tag);
int fd = file_descriptor_for_shared_alloc == -1 int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
? VM_MAKE_TAG(static_cast<int>(page_tag))
: file_descriptor_for_shared_alloc;
#else #else
int fd = file_descriptor_for_shared_alloc; int fd = -1;
#endif #endif
int access_flag = GetAccessFlags(accessibility); int access_flag = GetAccessFlags(accessibility);

View File

@ -32,7 +32,7 @@ constexpr bool kHintIsAdvisory = false;
std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS}; std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
int GetAccessFlags(PageAccessibilityConfiguration accessibility) { int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
switch (accessibility.permissions) { switch (accessibility) {
case PageAccessibilityConfiguration::kRead: case PageAccessibilityConfiguration::kRead:
return PAGE_READONLY; return PAGE_READONLY;
case PageAccessibilityConfiguration::kReadWrite: case PageAccessibilityConfiguration::kReadWrite:
@ -51,15 +51,13 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
} }
} }
uintptr_t SystemAllocPagesInternal( uintptr_t SystemAllocPagesInternal(uintptr_t hint,
uintptr_t hint,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag) {
[[maybe_unused]] int file_descriptor_for_shared_alloc) {
DWORD access_flag = GetAccessFlags(accessibility); DWORD access_flag = GetAccessFlags(accessibility);
const DWORD type_flags = (accessibility.permissions != const DWORD type_flags =
PageAccessibilityConfiguration::kInaccessible) (accessibility != PageAccessibilityConfiguration::kInaccessible)
? (MEM_RESERVE | MEM_COMMIT) ? (MEM_RESERVE | MEM_COMMIT)
: MEM_RESERVE; : MEM_RESERVE;
void* ret = VirtualAlloc(reinterpret_cast<void*>(hint), length, type_flags, void* ret = VirtualAlloc(reinterpret_cast<void*>(hint), length, type_flags,
@ -92,8 +90,7 @@ bool TrySetSystemPagesAccessInternal(
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address); void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions == if (accessibility == PageAccessibilityConfiguration::kInaccessible)
PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0; return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
return nullptr != return nullptr !=
VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility)); VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility));
@ -104,8 +101,7 @@ void SetSystemPagesAccessInternal(
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address); void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions == if (accessibility == PageAccessibilityConfiguration::kInaccessible) {
PageAccessibilityConfiguration::kInaccessible) {
if (!VirtualFree(ptr, length, MEM_DECOMMIT)) { if (!VirtualFree(ptr, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number. // report we get the error number.

View File

@ -5,7 +5,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include <array> #include <array>
#include <cstddef>
#include <cstdint> #include <cstdint>
#include <ostream> #include <ostream>
#include <string> #include <string>
@ -28,10 +27,6 @@
#include <windows.h> #include <windows.h>
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
#if defined(PA_ENABLE_SHADOW_METADATA)
#include <sys/mman.h>
#endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
@ -40,7 +35,7 @@ namespace {
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
bool IsLegacyWindowsVersion() { bool IsLegacyWindowsVersion() {
// Use ::RtlGetVersion instead of ::GetVersionEx or helpers from // Use ::RtlGetVersion instead of ::GetVersionEx or helpers from
// VersionHelpers.h because those alternatives change their behavior depending // VersionHelpers.h because those alternatives change their behavior depending
@ -64,20 +59,20 @@ bool IsLegacyWindowsVersion() {
return version_info.dwMajorVersion < 6 || return version_info.dwMajorVersion < 6 ||
(version_info.dwMajorVersion == 6 && version_info.dwMinorVersion < 3); (version_info.dwMajorVersion == 6 && version_info.dwMinorVersion < 3);
} }
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() { PA_NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
PA_CHECK(false); PA_CHECK(false);
} }
PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() { PA_NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
PA_CHECK(false); PA_CHECK(false);
} }
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
PA_NOINLINE void HandlePoolAllocFailure() { PA_NOINLINE void HandleGigaCageAllocFailure() {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
uint32_t alloc_page_error_code = GetAllocPageErrorCode(); uint32_t alloc_page_error_code = GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code)); PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
@ -87,12 +82,12 @@ PA_NOINLINE void HandlePoolAllocFailure() {
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) { if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE, // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
// it must be VA space exhaustion. // it must be VA space exhaustion.
HandlePoolAllocFailureOutOfVASpace(); HandleGigaCageAllocFailureOutOfVASpace();
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) { } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
// On Windows <8.1, MEM_RESERVE increases commit charge to account for // On Windows <8.1, MEM_RESERVE increases commit charge to account for
// not-yet-committed PTEs needed to cover that VA space, if it was to be // not-yet-committed PTEs needed to cover that VA space, if it was to be
// committed (see crbug.com/1101421#c16). // committed (see crbug.com/1101421#c16).
HandlePoolAllocFailureOutOfCommitCharge(); HandleGigaCageAllocFailureOutOfCommitCharge();
} else } else
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
{ {
@ -103,14 +98,9 @@ PA_NOINLINE void HandlePoolAllocFailure() {
} // namespace } // namespace
alignas(kPartitionCachelineSize) alignas(kPartitionCachelineSize)
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_; PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_;
#if defined(PA_ENABLE_SHADOW_METADATA) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
std::ptrdiff_t PartitionAddressSpace::brp_pool_shadow_offset_ = 0;
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
namespace { namespace {
bool IsIOSTestProcess() { bool IsIOSTestProcess() {
@ -161,30 +151,25 @@ PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsLegacyWindowsVersion() ? kBRPPoolSizeForLegacyWindows : kBRPPoolSize; return IsLegacyWindowsVersion() ? kBRPPoolSizeForLegacyWindows : kBRPPoolSize;
} }
#endif // BUILDFLAG(IS_IOS) #endif // BUILDFLAG(IS_IOS)
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
void PartitionAddressSpace::Init() { void PartitionAddressSpace::Init() {
if (IsInitialized()) if (IsInitialized())
return; return;
size_t regular_pool_size = RegularPoolSize(); size_t regular_pool_size = RegularPoolSize();
#if defined(PA_ENABLE_SHADOW_METADATA) setup_.regular_pool_base_address_ = AllocPages(
int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC); regular_pool_size, regular_pool_size,
#else PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
int regular_pool_fd = -1;
#endif
setup_.regular_pool_base_address_ =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
if (!setup_.regular_pool_base_address_) if (!setup_.regular_pool_base_address_)
HandlePoolAllocFailure(); HandleGigaCageAllocFailure();
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1); setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
#endif #endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1))); PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
AddressPoolManager::GetInstance().Add( setup_.regular_pool_ = AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size); setup_.regular_pool_base_address_, regular_pool_size);
PA_CHECK(setup_.regular_pool_ == kRegularPoolHandle);
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1)); PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_)); PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ + PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
@ -193,11 +178,6 @@ void PartitionAddressSpace::Init() {
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size)); !IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
size_t brp_pool_size = BRPPoolSize(); size_t brp_pool_size = BRPPoolSize();
#if defined(PA_ENABLE_SHADOW_METADATA)
int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
#else
int brp_pool_fd = -1;
#endif
// Reserve an extra allocation granularity unit before the BRP pool, but keep // Reserve an extra allocation granularity unit before the BRP pool, but keep
// the pool aligned at BRPPoolSize(). A pointer immediately past an allocation // the pool aligned at BRPPoolSize(). A pointer immediately past an allocation
// is a valid pointer, and having a "forbidden zone" before the BRP pool // is a valid pointer, and having a "forbidden zone" before the BRP pool
@ -206,17 +186,17 @@ void PartitionAddressSpace::Init() {
uintptr_t base_address = AllocPagesWithAlignOffset( uintptr_t base_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size, 0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize, brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc, PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc);
brp_pool_fd);
if (!base_address) if (!base_address)
HandlePoolAllocFailure(); HandleGigaCageAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize; setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1); setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#endif #endif
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1))); PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
AddressPoolManager::GetInstance().Add( setup_.brp_pool_ = AddressPoolManager::GetInstance().Add(
kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size); setup_.brp_pool_base_address_, brp_pool_size);
PA_CHECK(setup_.brp_pool_ == kBRPPoolHandle);
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1)); PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_)); PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1)); PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
@ -226,29 +206,11 @@ void PartitionAddressSpace::Init() {
// Reserve memory for PCScan quarantine card table. // Reserve memory for PCScan quarantine card table.
uintptr_t requested_address = setup_.regular_pool_base_address_; uintptr_t requested_address = setup_.regular_pool_base_address_;
uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve( uintptr_t actual_address = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, requested_address, kSuperPageSize); setup_.regular_pool_, requested_address, kSuperPageSize);
PA_CHECK(requested_address == actual_address) PA_CHECK(requested_address == actual_address)
<< "QuarantineCardTable is required to be allocated at the beginning of " << "QuarantineCardTable is required to be allocated at the beginning of "
"the regular pool"; "the regular pool";
#endif // PA_STARSCAN_USE_CARD_TABLE #endif // PA_STARSCAN_USE_CARD_TABLE
#if defined(PA_ENABLE_SHADOW_METADATA)
// Reserve memory for the shadow pools.
uintptr_t regular_pool_shadow_address =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
regular_pool_shadow_offset_ =
regular_pool_shadow_address - setup_.regular_pool_base_address_;
uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
brp_pool_shadow_offset_ =
brp_pool_shadow_address - setup_.brp_pool_base_address_;
#endif
} }
void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base, void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
@ -268,8 +230,9 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
setup_.configurable_pool_base_address_ = pool_base; setup_.configurable_pool_base_address_ = pool_base;
setup_.configurable_pool_base_mask_ = ~(size - 1); setup_.configurable_pool_base_mask_ = ~(size - 1);
AddressPoolManager::GetInstance().Add( setup_.configurable_pool_ = AddressPoolManager::GetInstance().Add(
kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size); setup_.configurable_pool_base_address_, size);
PA_CHECK(setup_.configurable_pool_ == kConfigurablePoolHandle);
} }
void PartitionAddressSpace::UninitForTesting() { void PartitionAddressSpace::UninitForTesting() {
@ -285,13 +248,17 @@ void PartitionAddressSpace::UninitForTesting() {
setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress; setup_.brp_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress; setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0; setup_.configurable_pool_base_mask_ = 0;
setup_.regular_pool_ = 0;
setup_.brp_pool_ = 0;
setup_.configurable_pool_ = 0;
AddressPoolManager::GetInstance().ResetForTesting(); AddressPoolManager::GetInstance().ResetForTesting();
} }
void PartitionAddressSpace::UninitConfigurablePoolForTesting() { void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle); AddressPoolManager::GetInstance().Remove(setup_.configurable_pool_);
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress; setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0; setup_.configurable_pool_base_mask_ = 0;
setup_.configurable_pool_ = 0;
} }
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)

View File

@ -7,7 +7,6 @@
#include <algorithm> #include <algorithm>
#include <array> #include <array>
#include <cstddef>
#include <limits> #include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
@ -31,11 +30,16 @@ namespace partition_alloc {
namespace internal { namespace internal {
// Manages PartitionAlloc address space, which is split into pools. // Reserves address space for PartitionAllocator.
// See `glossary.md`.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public: public:
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) // BRP stands for BackupRefPtr. GigaCage is split into pools, one which
// supports BackupRefPtr and one that doesn't.
static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() {
return setup_.regular_pool_;
}
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() { static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_; return setup_.regular_pool_base_mask_;
} }
@ -45,6 +49,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
} }
#endif #endif
static PA_ALWAYS_INLINE internal::pool_handle GetBRPPool() {
return setup_.brp_pool_;
}
// The Configurable Pool can be created inside an existing mapping and so will
// be located outside PartitionAlloc's GigaCage.
static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
return setup_.configurable_pool_;
}
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset( static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) { uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
@ -54,16 +68,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
pool_handle pool = 0; pool_handle pool = 0;
uintptr_t base = 0; uintptr_t base = 0;
if (IsInRegularPool(address)) { if (IsInRegularPool(address)) {
pool = kRegularPoolHandle; pool = GetRegularPool();
base = setup_.regular_pool_base_address_; base = setup_.regular_pool_base_address_;
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInBRPPool(address)) { } else if (IsInBRPPool(address)) {
pool = kBRPPoolHandle; pool = GetBRPPool();
base = setup_.brp_pool_base_address_; base = setup_.brp_pool_base_address_;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} else if (IsInConfigurablePool(address)) { } else if (IsInConfigurablePool(address)) {
PA_DCHECK(IsConfigurablePoolInitialized()); pool = GetConfigurablePool();
pool = kConfigurablePoolHandle;
base = setup_.configurable_pool_base_address_; base = setup_.configurable_pool_base_address_;
} else { } else {
PA_NOTREACHED(); PA_NOTREACHED();
@ -77,15 +90,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return kConfigurablePoolMinSize; return kConfigurablePoolMinSize;
} }
// Initialize pools (except for the configurable one). // Initialize the GigaCage and the Pools inside of it.
//
// This function must only be called from the main thread. // This function must only be called from the main thread.
static void Init(); static void Init();
// Initialize the ConfigurablePool at the given address |pool_base|. It must // Initialize the ConfigurablePool at the given address |pool_base|. It must
// be aligned to the size of the pool. The size must be a power of two and // be aligned to the size of the pool. The size must be a power of two and
// must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. This
// // function must only be called from the main thread.
// This function must only be called from the main thread.
static void InitConfigurablePool(uintptr_t pool_base, size_t size); static void InitConfigurablePool(uintptr_t pool_base, size_t size);
static void UninitForTesting(); static void UninitForTesting();
static void UninitConfigurablePoolForTesting(); static void UninitConfigurablePoolForTesting();
@ -93,12 +104,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE bool IsInitialized() { static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The // Either neither or both regular and BRP pool are initialized. The
// configurable pool is initialized separately. // configurable pool is initialized separately.
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) { if (setup_.regular_pool_) {
PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress); PA_DCHECK(setup_.brp_pool_ != 0);
return true; return true;
} }
PA_DCHECK(setup_.brp_pool_base_address_ == kUninitializedPoolBaseAddress); PA_DCHECK(setup_.brp_pool_ == 0);
return false; return false;
} }
@ -109,7 +120,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr. // Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_; const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else #else
constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask; constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
@ -124,19 +135,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// Returns false for nullptr. // Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_; const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else #else
constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask; constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
#endif #endif
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_; return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
} }
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
}
// Returns false for nullptr. // Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) == return (address & setup_.configurable_pool_base_mask_) ==
@ -147,21 +152,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return setup_.configurable_pool_base_address_; return setup_.configurable_pool_base_address_;
} }
#if defined(PA_ENABLE_SHADOW_METADATA) static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) { PA_DCHECK(IsInBRPPool(address));
if (pool == kRegularPoolHandle) { return address - setup_.brp_pool_base_address_;
return regular_pool_shadow_offset_;
} else if (pool == kBRPPoolHandle) {
return brp_pool_shadow_offset_;
} else {
// TODO(crbug.com/1362969): Add shadow for configurable pool as well.
// Shadow is not created for ConfigurablePool for now, so this part should
// be unreachable.
PA_NOTREACHED();
return 0;
} }
}
#endif
// PartitionAddressSpace is static_only class. // PartitionAddressSpace is static_only class.
PartitionAddressSpace() = delete; PartitionAddressSpace() = delete;
@ -170,7 +164,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
void* operator new(size_t, void*) = delete; void* operator new(size_t, void*) = delete;
private: private:
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static PA_ALWAYS_INLINE size_t RegularPoolSize(); static PA_ALWAYS_INLINE size_t RegularPoolSize();
static PA_ALWAYS_INLINE size_t BRPPoolSize(); static PA_ALWAYS_INLINE size_t BRPPoolSize();
#else #else
@ -181,32 +175,32 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() { constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
return kBRPPoolSize; return kBRPPoolSize;
} }
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// On 64-bit systems, PA allocates from several contiguous, mutually disjoint // On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
// pools. The BRP pool is where all allocations have a BRP ref-count, thus // where all allocations have a BRP ref-count, thus pointers pointing there
// pointers pointing there can use a BRP protection against UaF. Allocations // can use a BRP protection against UaF. Allocations in the other pools don't
// in the other pools don't have that. // have that.
// //
// Pool sizes have to be the power of two. Each pool will be aligned at its // Pool sizes have to be the power of two. Each pool will be aligned at its
// own size boundary. // own size boundary.
// //
// NOTE! The BRP pool must be preceded by an inaccessible region. This is to // NOTE! The BRP pool must be preceded by a reserved region, where allocations
// prevent a pointer to the end of a non-BRP-pool allocation from falling into // are forbidden. This is to prevent a pointer immediately past a non-GigaCage
// the BRP pool, thus triggering BRP mechanism and likely crashing. This // allocation from falling into the BRP pool, thus triggering BRP mechanism
// "forbidden zone" can be as small as 1B, but it's simpler to just reserve an // and likely crashing. This "forbidden zone" can be as small as 1B, but it's
// allocation granularity unit. // simpler to just reserve an allocation granularity unit.
// //
// The ConfigurablePool is an optional Pool that can be created inside an // The ConfigurablePool is an optional Pool that can be created inside an
// existing mapping provided by the embedder. This Pool can be used when // existing mapping by the embedder, and so will be outside of the GigaCage.
// certain PA allocations must be located inside a given virtual address // This Pool can be used when certain PA allocations must be located inside a
// region. One use case for this Pool is V8 Sandbox, which requires that // given virtual address region. One use case for this Pool is V8's virtual
// ArrayBuffers be located inside of it. // memory cage, which requires that ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize; static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize; static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) && static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
base::bits::IsPowerOfTwo(kBRPPoolSize)); base::bits::IsPowerOfTwo(kBRPPoolSize));
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see // We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
// crbug.com/1101421 and crbug.com/1217759). // crbug.com/1101421 and crbug.com/1217759).
static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB; static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB;
@ -215,7 +209,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize); static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) && static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) &&
base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows)); base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize; static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB; static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize); static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
@ -224,7 +218,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#error iOS is only supported with a dynamically sized GigaCase. #error iOS is only supported with a dynamically sized GigaCase.
#endif #endif
@ -239,7 +233,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess)); base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
#endif // BUILDFLAG(IOS_IOS) #endif // BUILDFLAG(IOS_IOS)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// Masks used to easy determine belonging to a pool. // Masks used to easy determine belonging to a pool.
static constexpr uintptr_t kRegularPoolOffsetMask = static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1; static_cast<uintptr_t>(kRegularPoolSize) - 1;
@ -247,26 +241,29 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr uintptr_t kBRPPoolOffsetMask = static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1; static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask; static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
#endif // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// This must be set to such a value that IsIn*Pool() always returns false when // This must be set to such a value that IsIn*Pool() always returns false when
// the pool isn't initialized. // the pool isn't initialized.
static constexpr uintptr_t kUninitializedPoolBaseAddress = static constexpr uintptr_t kUninitializedPoolBaseAddress =
static_cast<uintptr_t>(-1); static_cast<uintptr_t>(-1);
struct PoolSetup { struct GigaCageSetup {
// Before PartitionAddressSpace::Init(), no allocation are allocated from a // Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to // reserved address space. Therefore, set *_pool_base_address_ initially to
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false. // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
constexpr PoolSetup() constexpr GigaCageSetup()
: regular_pool_base_address_(kUninitializedPoolBaseAddress), : regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress), brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress), configurable_pool_base_address_(kUninitializedPoolBaseAddress),
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
regular_pool_base_mask_(0), regular_pool_base_mask_(0),
brp_pool_base_mask_(0), brp_pool_base_mask_(0),
#endif #endif
configurable_pool_base_mask_(0) { configurable_pool_base_mask_(0),
regular_pool_(0),
brp_pool_(0),
configurable_pool_(0) {
} }
// Using a union to enforce padding. // Using a union to enforce padding.
@ -275,30 +272,29 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
uintptr_t regular_pool_base_address_; uintptr_t regular_pool_base_address_;
uintptr_t brp_pool_base_address_; uintptr_t brp_pool_base_address_;
uintptr_t configurable_pool_base_address_; uintptr_t configurable_pool_base_address_;
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
uintptr_t regular_pool_base_mask_; uintptr_t regular_pool_base_mask_;
uintptr_t brp_pool_base_mask_; uintptr_t brp_pool_base_mask_;
#endif #endif
uintptr_t configurable_pool_base_mask_; uintptr_t configurable_pool_base_mask_;
pool_handle regular_pool_;
pool_handle brp_pool_;
pool_handle configurable_pool_;
}; };
char one_cacheline_[kPartitionCachelineSize]; char one_cacheline_[kPartitionCachelineSize];
}; };
}; };
static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0, static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
"PoolSetup has to fill a cacheline(s)"); "GigaCageSetup has to fill a cacheline(s)");
// See the comment describing the address layout above. // See the comment describing the address layout above.
// //
// These are write-once fields, frequently accessed thereafter. Make sure they // These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through // don't share a cacheline with other, potentially writeable data, through
// alignment and padding. // alignment and padding.
alignas(kPartitionCachelineSize) static PoolSetup setup_; alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
#if defined(PA_ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
static std::ptrdiff_t brp_pool_shadow_offset_;
#endif
}; };
PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset( PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
@ -314,12 +310,6 @@ PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
return PartitionAddressSpace::OffsetInBRPPool(address); return PartitionAddressSpace::OffsetInBRPPool(address);
} }
#if defined(PA_ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
return PartitionAddressSpace::ShadowPoolOffset(pool);
}
#endif
} // namespace internal } // namespace internal
// Returns false for nullptr. // Returns false for nullptr.

View File

@ -49,7 +49,6 @@ PA_ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
#pragma optimize("", on) #pragma optimize("", on)
#endif #endif
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Used to memset() memory for debugging purposes only. // Used to memset() memory for debugging purposes only.
PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) { PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses // Only set the first 512kiB of the allocation. This is enough to detect uses
@ -59,7 +58,6 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
size_t size_to_memset = std::min(size, size_t{1} << 19); size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset); memset(ptr, value, size_to_memset);
} }
#endif // BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
// Returns true if we've hit the end of a random-length period. We don't want to // Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot // invoke `RandomValue` too often, because we call this function in a hot spot

View File

@ -84,9 +84,3 @@ assert(enable_backup_ref_ptr_support || !enable_backup_ref_ptr_slow_checks,
assert( assert(
enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks, enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks,
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all") "Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
declare_args() {
enable_pkeys = is_linux && target_cpu == "x64"
}
assert(!enable_pkeys || (is_linux && target_cpu == "x64"),
"Pkeys are only supported on x64 linux")

View File

@ -1,36 +0,0 @@
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base::ios {
// Returns whether the operating system is iOS 12 or later.
// TODO(crbug.com/1129482): Remove once minimum supported version is at least 12
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS12OrLater();
// Returns whether the operating system is iOS 13 or later.
// TODO(crbug.com/1129483): Remove once minimum supported version is at least 13
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS13OrLater();
// Returns whether the operating system is iOS 14 or later.
// TODO(crbug.com/1129484): Remove once minimum supported version is at least 14
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS14OrLater();
// Returns whether the operating system is iOS 15 or later.
// TODO(crbug.com/1227419): Remove once minimum supported version is at least 15
PA_COMPONENT_EXPORT(PARTITION_ALLOC) bool IsRunningOnIOS15OrLater();
// Returns whether the operating system is at the given version or later.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix);
} // namespace partition_alloc::internal::base::ios
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_IOS_IOS_UTIL_H_

View File

@ -1,57 +0,0 @@
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h"
#include <array>
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
namespace partition_alloc::internal::base::ios {
bool IsRunningOnIOS12OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(12, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS13OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(13, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS14OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(14, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnIOS15OrLater() {
static const bool is_running_on_or_later = IsRunningOnOrLater(15, 0, 0);
return is_running_on_or_later;
}
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const class OSVersion {
public:
OSVersion() {
SysInfo::OperatingSystemVersionNumbers(
&current_version_[0], &current_version_[1], &current_version_[2]);
}
bool IsRunningOnOrLater(int32_t version[3]) const {
for (size_t i = 0; i < std::size(current_version_); ++i) {
if (current_version_[i] != version[i])
return current_version_[i] > version[i];
}
return true;
}
private:
int32_t current_version_[3];
} kOSVersion;
int32_t version[3] = {major, minor, bug_fix};
return kOSVersion.IsRunningOnOrLater(version);
}
} // namespace partition_alloc::internal::base::ios

View File

@ -67,13 +67,12 @@ int g_min_log_level = 0;
// A log message handler that gets notified of every log message we process. // A log message handler that gets notified of every log message we process.
LogMessageHandlerFunction g_log_message_handler = nullptr; LogMessageHandlerFunction g_log_message_handler = nullptr;
#if !BUILDFLAG(IS_WIN) void WriteToFd(int fd, const char* data, size_t length) {
void WriteToStderr(const char* data, size_t length) {
size_t bytes_written = 0; size_t bytes_written = 0;
int rv; int rv;
while (bytes_written < length) { while (bytes_written < length) {
rv = PA_HANDLE_EINTR( rv = PA_HANDLE_EINTR(
write(STDERR_FILENO, data + bytes_written, length - bytes_written)); write(fd, data + bytes_written, length - bytes_written));
if (rv < 0) { if (rv < 0) {
// Give up, nothing we can do now. // Give up, nothing we can do now.
break; break;
@ -81,22 +80,6 @@ void WriteToStderr(const char* data, size_t length) {
bytes_written += rv; bytes_written += rv;
} }
} }
#else // !BUILDFLAG(IS_WIN)
void WriteToStderr(const char* data, size_t length) {
HANDLE handle = ::GetStdHandle(STD_ERROR_HANDLE);
const char* ptr = data;
const char* ptr_end = data + length;
while (ptr < ptr_end) {
DWORD bytes_written = 0;
if (!::WriteFile(handle, ptr, ptr_end - ptr, &bytes_written, nullptr) ||
bytes_written == 0) {
// Give up, nothing we can do now.
break;
}
ptr += bytes_written;
}
}
#endif // !BUILDFLAG(IS_WIN)
} // namespace } // namespace
@ -262,15 +245,18 @@ ErrnoLogMessage::~ErrnoLogMessage() {
void RawLog(int level, const char* message) { void RawLog(int level, const char* message) {
if (level >= g_min_log_level && message) { if (level >= g_min_log_level && message) {
#if !BUILDFLAG(IS_WIN)
const size_t message_len = strlen(message); const size_t message_len = strlen(message);
#else // !BUILDFLAG(IS_WIN) WriteToFd(STDERR_FILENO, message, message_len);
const size_t message_len = ::lstrlenA(message);
#endif // !BUILDFLAG(IS_WIN)
WriteToStderr(message, message_len);
if (message_len > 0 && message[message_len - 1] != '\n') { if (message_len > 0 && message[message_len - 1] != '\n') {
WriteToStderr("\n", 1); int rv;
do {
rv = PA_HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
if (rv < 0) {
// Give up, nothing we can do now.
break;
}
} while (rv != 1);
} }
} }

View File

@ -7,8 +7,9 @@
#include <AvailabilityMacros.h> #include <AvailabilityMacros.h>
#import <CoreGraphics/CoreGraphics.h> #import <CoreGraphics/CoreGraphics.h>
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include <string>
namespace partition_alloc::internal::base::mac { namespace partition_alloc::internal::base::mac {
@ -18,7 +19,7 @@ namespace internal {
// integer value. For example, for macOS Sierra this returns 1012, and for macOS // integer value. For example, for macOS Sierra this returns 1012, and for macOS
// Big Sur it returns 1100. Note that the accuracy returned by this function is // Big Sur it returns 1100. Note that the accuracy returned by this function is
// as granular as the major version number of Darwin. // as granular as the major version number of Darwin.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) int MacOSVersion(); int MacOSVersion();
} // namespace internal } // namespace internal
@ -95,12 +96,6 @@ PA_DEFINE_IS_OS_FUNCS(12, PA_TEST_DEPLOYMENT_TARGET)
PA_DEFINE_IS_OS_FUNCS(12, PA_IGNORE_DEPLOYMENT_TARGET) PA_DEFINE_IS_OS_FUNCS(12, PA_IGNORE_DEPLOYMENT_TARGET)
#endif #endif
#ifdef MAC_OS_VERSION_13_0
PA_DEFINE_IS_OS_FUNCS(13, PA_TEST_DEPLOYMENT_TARGET)
#else
PA_DEFINE_IS_OS_FUNCS(13, PA_IGNORE_DEPLOYMENT_TARGET)
#endif
#undef PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED #undef PA_DEFINE_OLD_IS_OS_FUNCS_CR_MIN_REQUIRED
#undef PA_DEFINE_OLD_IS_OS_FUNCS #undef PA_DEFINE_OLD_IS_OS_FUNCS
#undef PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED #undef PA_DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED
@ -109,13 +104,6 @@ PA_DEFINE_IS_OS_FUNCS(13, PA_IGNORE_DEPLOYMENT_TARGET)
#undef PA_TEST_DEPLOYMENT_TARGET #undef PA_TEST_DEPLOYMENT_TARGET
#undef PA_IGNORE_DEPLOYMENT_TARGET #undef PA_IGNORE_DEPLOYMENT_TARGET
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
inline bool IsOSLaterThan13_DontCallThis() {
return !IsAtMostOS13();
}
} // namespace partition_alloc::internal::base::mac } // namespace partition_alloc::internal::base::mac
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MAC_MAC_UTIL_H_

View File

@ -10,50 +10,28 @@
#include <utility> #include <utility>
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
// A tag type used for NoDestructor to allow it to be created for a type that
// has a trivial destructor. Use for cases where the same class might have
// different implementations that vary on destructor triviality or when the
// LSan hiding properties of NoDestructor are needed.
struct AllowForTriviallyDestructibleType;
// Helper type to create a function-local static variable of type `T` when `T` // A wrapper that makes it easy to create an object of type T with static
// has a non-trivial destructor. Storing a `T` in a `base::NoDestructor<T>` will // storage duration that:
// prevent `~T()` from running, even when the variable goes out of scope. // - is only constructed on first access
// - never invokes the destructor
// in order to satisfy the styleguide ban on global constructors and
// destructors.
// //
// Useful when a variable has static storage duration but its type has a // Runtime constant example:
// non-trivial destructor. Chromium bans global constructors and destructors: // const std::string& GetLineSeparator() {
// using a function-local static variable prevents the former, while using // // Forwards to std::string(size_t, char, const Allocator&) constructor.
// `base::NoDestructor<T>` prevents the latter. // static const base::NoDestructor<std::string> s(5, '-');
//
// ## Caveats
//
// - Must only be used as a function-local static variable. Declaring a global
// variable of type `base::NoDestructor<T>` will still generate a global
// constructor; declaring a local or member variable will lead to memory leaks
// or other surprising and undesirable behaviour.
//
// - If the data is rarely used, consider creating it on demand rather than
// caching it for the lifetime of the program. Though `base::NoDestructor<T>`
// does not heap allocate, the compiler still reserves space in bss for
// storing `T`, which costs memory at runtime.
//
// - If `T` is trivially destructible, do not use `base::NoDestructor<T>`:
//
// const uint64_t GetUnstableSessionSeed() {
// // No need to use `base::NoDestructor<T>` as `uint64_t` is trivially
// // destructible and does not require a global destructor.
// static const uint64_t kSessionSeed = base::RandUint64();
// return kSessionSeed;
// }
//
// ## Example Usage
//
// const std::string& GetDefaultText() {
// // Required since `static const std::string` requires a global destructor.
// static const base::NoDestructor<std::string> s("Hello world!");
// return *s; // return *s;
// } // }
// //
// More complex initialization using a lambda: // More complex initialization with a lambda:
// // const std::string& GetSessionNonce() {
// const std::string& GetRandomNonce() {
// // `nonce` is initialized with random data the first time this function is
// // called, but its value is fixed thereafter.
// static const base::NoDestructor<std::string> nonce([] { // static const base::NoDestructor<std::string> nonce([] {
// std::string s(16); // std::string s(16);
// crypto::RandString(s.data(), s.size()); // crypto::RandString(s.data(), s.size());
@ -62,24 +40,29 @@ namespace partition_alloc::internal::base {
// return *nonce; // return *nonce;
// } // }
// //
// ## Thread safety // NoDestructor<T> stores the object inline, so it also avoids a pointer
// indirection and a malloc. Also note that since C++11 static local variable
// initialization is thread-safe and so is this pattern. Code should prefer to
// use NoDestructor<T> over:
// - A function scoped static T* or T& that is dynamically initialized.
// - A global base::LazyInstance<T>.
// //
// Initialisation of function-local static variables is thread-safe since C++11. // Note that since the destructor is never run, this *will* leak memory if used
// The standard guarantees that: // as a stack or member variable. Furthermore, a NoDestructor<T> should never
// // have global scope as that may require a static initializer.
// - function-local static variables will be initialised the first time template <typename T, typename O = std::nullptr_t>
// execution passes through the declaration.
//
// - if another thread's execution concurrently passes through the declaration
// in the middle of initialisation, that thread will wait for the in-progress
// initialisation to complete.
template <typename T>
class NoDestructor { class NoDestructor {
public: public:
static_assert( static_assert(
!std::is_trivially_destructible_v<T>, !std::is_trivially_destructible<T>::value ||
"T is trivially destructible; please use a function-local static " std::is_same<O, AllowForTriviallyDestructibleType>::value,
"of type T directly instead"); "base::NoDestructor is not needed because the templated class has a "
"trivial destructor");
static_assert(std::is_same<O, AllowForTriviallyDestructibleType>::value ||
std::is_same<O, std::nullptr_t>::value,
"AllowForTriviallyDestructibleType is the only valid option "
"for the second template parameter of NoDestructor");
// Not constexpr; just write static constexpr T x = ...; if the value should // Not constexpr; just write static constexpr T x = ...; if the value should
// be a constexpr. // be a constexpr.

View File

@ -1,29 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc::internal::base {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SysInfo {
public:
// Retrieves detailed numeric values for the OS version.
// DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
// for OS version-specific feature checks and workarounds. If you must use
// an OS version check instead of a feature check, use the base::mac::IsOS*
// family from base/mac/mac_util.h, or base::win::GetVersion from
// base/win/windows_version.h.
static void OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version);
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYSTEM_SYS_INFO_H_

View File

@ -1,24 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
#import <Foundation/Foundation.h>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
namespace partition_alloc::internal::base {
// static
void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version) {
NSOperatingSystemVersion version =
[[NSProcessInfo processInfo] operatingSystemVersion];
*major_version = saturated_cast<int32_t>(version.majorVersion);
*minor_version = saturated_cast<int32_t>(version.minorVersion);
*bugfix_version = saturated_cast<int32_t>(version.patchVersion);
}
} // namespace partition_alloc::internal::base

View File

@ -1,24 +0,0 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/system/sys_info.h"
#import <Foundation/Foundation.h>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
namespace partition_alloc::internal::base {
// static
void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version) {
NSOperatingSystemVersion version =
[[NSProcessInfo processInfo] operatingSystemVersion];
*major_version = saturated_cast<int32_t>(version.majorVersion);
*minor_version = saturated_cast<int32_t>(version.minorVersion);
*bugfix_version = saturated_cast<int32_t>(version.patchVersion);
}
} // namespace partition_alloc::internal::base

View File

@ -31,18 +31,18 @@ static_assert(sizeof(void*) != 8, "");
#endif #endif
#if defined(PA_HAS_64_BITS_POINTERS) && (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN)) #if defined(PA_HAS_64_BITS_POINTERS) && (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
// Allow PA to select an alternate pool size at run-time before initialization, // Use dynamically sized GigaCage. This allows to query the size at run-time,
// rather than using a single constexpr value. // before initialization, instead of using a hardcoded constexpr.
// //
// This is needed on iOS because iOS test processes can't handle large pools // This is needed on iOS because iOS test processes can't handle a large cage
// (see crbug.com/1250788). // (see crbug.com/1250788).
// //
// This is needed on Windows, because OS versions <8.1 incur commit charge even // This is needed on Windows, because OS versions <8.1 incur commit charge even
// on reserved address space, thus don't handle large pools well (see // on reserved address space, thus don't handle large cage well (see
// crbug.com/1101421 and crbug.com/1217759). // crbug.com/1101421 and crbug.com/1217759).
// //
// This setting is specific to 64-bit, as 32-bit has a different implementation. // This setting is specific to 64-bit, as 32-bit has a different implementation.
#define PA_DYNAMICALLY_SELECT_POOL_SIZE #define PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE
#endif // defined(PA_HAS_64_BITS_POINTERS) && #endif // defined(PA_HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN)) // (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
@ -254,10 +254,10 @@ constexpr bool kUseLazyCommit = false;
// Enable shadow metadata. // Enable shadow metadata.
// //
// With this flag, shadow pools will be mapped, on which writable shadow // With this flag, a shadow GigaCage will be mapped, on which writable shadow
// metadatas are placed, and the real metadatas are set to read-only instead. // metadatas are placed, and the real metadatas are set to read-only instead.
// This feature is only enabled with 64-bit environment because pools work // This feature is only enabled with 64-bits CPUs because GigaCage does not
// differently with 32-bits pointers (see glossary). // exist with 32-bits CPUs.
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \ #if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
defined(PA_HAS_64_BITS_POINTERS) defined(PA_HAS_64_BITS_POINTERS)
#define PA_ENABLE_SHADOW_METADATA #define PA_ENABLE_SHADOW_METADATA

View File

@ -80,7 +80,7 @@ constexpr size_t kPartitionCachelineSize = 64;
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page. // up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64) #if defined(_MIPS_ARCH_LOONGSON)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() { PartitionPageShift() {
return 16; // 64 KiB return 16; // 64 KiB
@ -259,16 +259,18 @@ constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1; constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// PartitionAlloc's address space is split into pools. See `glossary.md`. // GigaCage is generally split into two pools, one which supports BackupRefPtr
// (BRP) and one that doesn't.
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// The 3rd, Configurable Pool is only available in 64-bit mode.
constexpr size_t kNumPools = 3; constexpr size_t kNumPools = 3;
// Maximum pool size. With exception of Configurable Pool, it is also // Maximum GigaCage pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which // the actual size, unless PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE is set, which
// allows to choose a different size at initialization time for certain // allows to choose a different size at initialization time for certain
// configurations. // configurations.
// //
// Special-case Android and iOS, which incur test failures with larger // Special-case Android and iOS, which incur test failures with larger
// pools. Regardless, allocating >8GiB with malloc() on these platforms is // GigaCage. Regardless, allocating >8GiB with malloc() on these platforms is
// unrealistic as of 2022. // unrealistic as of 2022.
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS)
constexpr size_t kPoolMaxSize = 8 * kGiB; constexpr size_t kPoolMaxSize = 8 * kGiB;
@ -316,7 +318,7 @@ constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size, // In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools. // because this is the reservation granularity of the GigaCage.
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
return kSuperPageSize; return kSuperPageSize;
} }
@ -327,7 +329,7 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
#else // defined(PA_HAS_64_BITS_POINTERS) #else // defined(PA_HAS_64_BITS_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system // In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation // allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps // unit. However, don't go below partition page size, so that GigaCage bitmaps
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap. // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularity() { DirectMapAllocationGranularity() {
@ -454,15 +456,11 @@ constexpr size_t kInvalidBucketSize = 1;
constexpr size_t kMac11MallocSizeHackRequestedSize = 32; constexpr size_t kMac11MallocSizeHackRequestedSize = 32;
// Usable size for allocations that require the hack. // Usable size for allocations that require the hack.
constexpr size_t kMac11MallocSizeHackUsableSize = constexpr size_t kMac11MallocSizeHackUsableSize =
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) || \ #if BUILDFLAG(PA_DCHECK_IS_ON)
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE) || \
defined(PA_REF_COUNT_CHECK_COOKIE)
40; 40;
#else #else
44; 44;
#endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS) || #endif // BUILDFLAG(PA_DCHECK_IS_ON)
// defined(PA_REF_COUNT_STORE_REQUESTED_SIZE) ||
// defined(PA_REF_COUNT_CHECK_COOKIE)
#endif // defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK) #endif // defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK)
} // namespace internal } // namespace internal

View File

@ -45,17 +45,6 @@ namespace partition_alloc::internal {
namespace { namespace {
#if defined(PA_ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE uintptr_t ShadowMetadataStart(uintptr_t super_page,
pool_handle pool) {
uintptr_t shadow_metadata_start =
super_page + SystemPageSize() + ShadowPoolOffset(pool);
PA_DCHECK(!PartitionAddressSpace::IsInRegularPool(shadow_metadata_start));
PA_DCHECK(!PartitionAddressSpace::IsInBRPPool(shadow_metadata_start));
return shadow_metadata_start;
}
#endif
template <bool thread_safe> template <bool thread_safe>
[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure( [[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure(
PartitionRoot<thread_safe>* root, PartitionRoot<thread_safe>* root,
@ -97,8 +86,8 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
#endif // !defined(PA_HAS_64_BITS_POINTERS) && #endif // !defined(PA_HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// Reserves |requested_size| worth of super pages from the specified pool. // Reserves |requested_size| worth of super pages from the specified pool of the
// If BRP pool is requested this function will honor BRP block list. // GigaCage. If BRP pool is requested this function will honor BRP block list.
// //
// The returned address will be aligned to kSuperPageSize, and so // The returned address will be aligned to kSuperPageSize, and so
// |requested_address| should be. |requested_size| doesn't have to be, however. // |requested_address| should be. |requested_size| doesn't have to be, however.
@ -114,7 +103,7 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
// AreAllowedSuperPagesForBRPPool. // AreAllowedSuperPagesForBRPPool.
// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is // - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
// designed to not need locking. // designed to not need locking.
uintptr_t ReserveMemoryFromPool(pool_handle pool, uintptr_t ReserveMemoryFromGigaCage(pool_handle pool,
uintptr_t requested_address, uintptr_t requested_address,
size_t requested_size) { size_t requested_size) {
PA_DCHECK(!(requested_address % kSuperPageSize)); PA_DCHECK(!(requested_address % kSuperPageSize));
@ -126,7 +115,7 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
// allocation honors the block list. Find a better address otherwise. // allocation honors the block list. Find a better address otherwise.
#if !defined(PA_HAS_64_BITS_POINTERS) && \ #if !defined(PA_HAS_64_BITS_POINTERS) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) { if (pool == GetBRPPool()) {
constexpr int kMaxRandomAddressTries = 10; constexpr int kMaxRandomAddressTries = 10;
for (int i = 0; i < kMaxRandomAddressTries; ++i) { for (int i = 0; i < kMaxRandomAddressTries; ++i) {
if (!reserved_address || if (!reserved_address ||
@ -235,16 +224,16 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr; PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
PartitionPage<thread_safe>* page = nullptr; PartitionPage<thread_safe>* page = nullptr;
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
const PartitionTag tag = root->GetNewPartitionTag(); const PartitionTag tag = root->GetNewPartitionTag();
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
{ {
// Getting memory for direct-mapped allocations doesn't interact with the // Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several // rest of the allocator, but takes a long time, as it involves several
// system calls. Although no mmap() (or equivalent) calls are made on // system calls. With GigaCage, no mmap() (or equivalent) call is made on 64
// 64 bit systems, page permissions are changed with mprotect(), which is // bit systems, but page permissions are changed with mprotect(), which is a
// a syscall. // syscall.
// //
// These calls are almost always slow (at least a couple us per syscall on a // These calls are almost always slow (at least a couple us per syscall on a
// desktop Linux machine), and they also have a very long latency tail, // desktop Linux machine), and they also have a very long latency tail,
@ -277,15 +266,17 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PA_DCHECK(slot_size <= available_reservation_size); PA_DCHECK(slot_size <= available_reservation_size);
#endif #endif
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool(); pool_handle pool = root->ChoosePool();
uintptr_t reservation_start; uintptr_t reservation_start;
{ {
// Reserving memory from the pool is actually not a syscall on 64 bit // Reserving memory from the GigaCage is actually not a syscall on 64 bit
// platforms. // platforms.
#if !defined(PA_HAS_64_BITS_POINTERS) #if !defined(PA_HAS_64_BITS_POINTERS)
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
#endif #endif
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size); reservation_start = ReserveMemoryFromGigaCage(pool, 0, reservation_size);
} }
if (PA_UNLIKELY(!reservation_start)) { if (PA_UNLIKELY(!reservation_start)) {
if (return_null) if (return_null)
@ -303,41 +294,23 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{ {
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
RecommitSystemPages(reservation_start + SystemPageSize(), RecommitSystemPages(
SystemPageSize(), reservation_start + SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
PageAccessibilityConfiguration::kRead,
#else
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityDisposition::kRequireUpdate);
}
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
// used, allocate a SystemPage for RefCount "bitmap" (only one of its // used, allocate 2 SystemPages, one for SuperPage metadata and the
// elements will be used). // other for RefCount "bitmap" (only one of its elements will be
if (pool == kBRPPoolHandle) { // used).
ScopedSyscallTimer timer{root}; (pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
RecommitSystemPages(reservation_start + SystemPageSize() * 2, #else
SystemPageSize(), SystemPageSize(),
#endif
PageAccessibilityConfiguration::kReadWrite, PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate); PageAccessibilityDisposition::kRequireUpdate);
} }
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(reservation_start, pool),
SystemPageSize(),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
// No need to hold root->lock_. Now that memory is reserved, no other // No need to hold root->lock_. Now that memory is reserved, no other
// overlapping region can be allocated (because of how pools work), // overlapping region can be allocated (because of how GigaCage works),
// so no other thread can update the same offset table entries at the // so no other thread can update the same offset table entries at the
// same time. Furthermore, nobody will be ready these offsets until this // same time. Furthermore, nobody will be ready these offsets until this
// function returns. // function returns.
@ -409,7 +382,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
SlotSpanMetadata<thread_safe>(&metadata->bucket); SlotSpanMetadata<thread_safe>(&metadata->bucket);
// It is typically possible to map a large range of inaccessible pages, and // It is typically possible to map a large range of inaccessible pages, and
// this is leveraged in multiple places, including the pools. However, // this is leveraged in multiple places, including the GigaCage. However,
// this doesn't mean that we can commit all this memory. For the vast // this doesn't mean that we can commit all this memory. For the vast
// majority of allocations, this just means that we crash in a slightly // majority of allocations, this just means that we crash in a slightly
// different place, but for callers ready to handle failures, we have to // different place, but for callers ready to handle failures, we have to
@ -449,9 +422,9 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
map_extent->padding_for_alignment = padding_for_alignment; map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &metadata->bucket; map_extent->bucket = &metadata->bucket;
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
DirectMapPartitionTagSetValue(slot_start, tag); DirectMapPartitionTagSetValue(slot_start, tag);
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
} }
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
@ -601,7 +574,7 @@ void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
slot_size = new_slot_size; slot_size = new_slot_size;
slot_size_reciprocal = kReciprocalMask / new_slot_size + 1; slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
active_slot_spans_head = active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const(); SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
empty_slot_spans_head = nullptr; empty_slot_spans_head = nullptr;
decommitted_slot_spans_head = nullptr; decommitted_slot_spans_head = nullptr;
num_full_slot_spans = 0; num_full_slot_spans = 0;
@ -729,8 +702,10 @@ uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPageSpan(
// page table bloat and not fragmenting address spaces in 32 bit // page table bloat and not fragmenting address spaces in 32 bit
// architectures. // architectures.
uintptr_t requested_address = root->next_super_page; uintptr_t requested_address = root->next_super_page;
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool(); pool_handle pool = root->ChoosePool();
uintptr_t super_page_span_start = ReserveMemoryFromPool( uintptr_t super_page_span_start = ReserveMemoryFromGigaCage(
pool, requested_address, super_page_count * kSuperPageSize); pool, requested_address, super_page_count * kSuperPageSize);
if (PA_UNLIKELY(!super_page_span_start)) { if (PA_UNLIKELY(!super_page_span_start)) {
if (flags & AllocFlags::kReturnNull) if (flags & AllocFlags::kReturnNull)
@ -805,35 +780,20 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
// also a tiny amount of extent metadata. // also a tiny amount of extent metadata.
{ {
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(), RecommitSystemPages(super_page + SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
PageAccessibilityConfiguration::kRead,
#else
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityDisposition::kRequireUpdate);
}
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the
// used, allocate a SystemPage for RefCount bitmap. // BRP pool is used, allocate 2 SystemPages, one for
if (root->ChoosePool() == kBRPPoolHandle) { // SuperPage metadata and the other for RefCount bitmap.
ScopedSyscallTimer timer{root}; (root->ChoosePool() == GetBRPPool())
RecommitSystemPages(super_page + SystemPageSize() * 2, SystemPageSize(), ? SystemPageSize() * 2
PageAccessibilityConfiguration::kReadWrite, : SystemPageSize(),
PageAccessibilityDisposition::kRequireUpdate); #else
}
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(super_page, root->ChoosePool()),
SystemPageSize(), SystemPageSize(),
#endif
PageAccessibilityConfiguration::kReadWrite, PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate); PageAccessibilityDisposition::kRequireUpdate);
} }
#endif
// If we were after a specific address, but didn't get it, assume that // If we were after a specific address, but didn't get it, assume that
// the system chose a lousy address. Here most OS'es have a default // the system chose a lousy address. Here most OS'es have a default
@ -1002,10 +962,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable. // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size); TagMemoryRangeRandomly(return_slot, slot_size);
} }
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(return_slot, slot_size, NormalBucketPartitionTagSetValue(return_slot, slot_size,
root->GetNewPartitionTag()); root->GetNewPartitionTag());
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// Add all slots that fit within so far committed pages to the free list. // Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr; PartitionFreelistEntry* prev_entry = nullptr;
@ -1022,10 +982,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// No MTE-tagging for larger slots, just cast. // No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot); next_slot_ptr = reinterpret_cast<void*>(next_slot);
} }
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(next_slot, slot_size, NormalBucketPartitionTagSetValue(next_slot, slot_size,
root->GetNewPartitionTag()); root->GetNewPartitionTag());
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr); auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) { if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry); PA_DCHECK(!prev_entry);
@ -1144,7 +1104,8 @@ bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
++num_full_slot_spans; ++num_full_slot_spans;
// Overflow. Most likely a correctness issue in the code. It is in theory // Overflow. Most likely a correctness issue in the code. It is in theory
// possible that the number of full slot spans really reaches (1 << 24), // possible that the number of full slot spans really reaches (1 << 24),
// but this is very unlikely (and not possible with most pool settings). // but this is very unlikely (and not possible with most GigaCage
// settings).
PA_CHECK(num_full_slot_spans); PA_CHECK(num_full_slot_spans);
// Not necessary but might help stop accidents. // Not necessary but might help stop accidents.
slot_span->next_slot_span = nullptr; slot_span->next_slot_span = nullptr;
@ -1170,7 +1131,7 @@ bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
} else { } else {
// Active list is now empty. // Active list is now empty.
active_slot_spans_head = active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const(); SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
} }
return usable_active_list_head; return usable_active_list_head;
@ -1218,7 +1179,7 @@ void PartitionBucket<thread_safe>::MaintainActiveList() {
if (!new_active_slot_spans_head) { if (!new_active_slot_spans_head) {
new_active_slot_spans_head = new_active_slot_spans_head =
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const(); SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
} }
active_slot_spans_head = new_active_slot_spans_head; active_slot_spans_head = new_active_slot_spans_head;
} }
@ -1317,13 +1278,7 @@ void PartitionBucket<thread_safe>::SortActiveSlotSpans() {
// Reverse order, since we insert at the head of the list. // Reverse order, since we insert at the head of the list.
for (int i = index - 1; i >= 0; i--) { for (int i = index - 1; i >= 0; i--) {
if (active_spans_array[i] ==
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
// The sentinel is const, don't try to write to it.
PA_DCHECK(active_slot_spans_head == nullptr);
} else {
active_spans_array[i]->next_slot_span = active_slot_spans_head; active_spans_array[i]->next_slot_span = active_slot_spans_head;
}
active_slot_spans_head = active_spans_array[i]; active_slot_spans_head = active_spans_array[i];
} }
} }

View File

@ -16,7 +16,7 @@ static constexpr size_t kCookieSize = 16;
// Cookie is enabled for debug builds. // Cookie is enabled for debug builds.
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
inline constexpr unsigned char kCookieValue[kCookieSize] = { static constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};

View File

@ -73,7 +73,7 @@ PA_ALWAYS_INLINE void PartitionDirectUnmap(
// This can create a fake "address space exhaustion" OOM, in the case where // This can create a fake "address space exhaustion" OOM, in the case where
// e.g. a large allocation is freed on a thread, and another large one is made // e.g. a large allocation is freed on a thread, and another large one is made
// from another *before* UnmapNow() has finished running. In this case the // from another *before* UnmapNow() has finished running. In this case the
// second one may not find enough space in the pool, and fail. This is // second one may not find enough space in the GigaCage, and fail. This is
// expected to be very rare though, and likely preferable to holding the lock // expected to be very rare though, and likely preferable to holding the lock
// while releasing the address space. // while releasing the address space.
ScopedUnlockGuard unlock{root->lock_}; ScopedUnlockGuard unlock{root->lock_};
@ -137,23 +137,16 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
} }
// static // static
template <bool thread_safe> template <bool thread_safe>
const SlotSpanMetadata<thread_safe> SlotSpanMetadata<thread_safe>
SlotSpanMetadata<thread_safe>::sentinel_slot_span_; SlotSpanMetadata<thread_safe>::sentinel_slot_span_;
// static // static
template <bool thread_safe> template <bool thread_safe>
const SlotSpanMetadata<thread_safe>* SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() { SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
return &sentinel_slot_span_; return &sentinel_slot_span_;
} }
// static
template <bool thread_safe>
SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::get_sentinel_slot_span_non_const() {
return const_cast<SlotSpanMetadata<thread_safe>*>(&sentinel_slot_span_);
}
template <bool thread_safe> template <bool thread_safe>
SlotSpanMetadata<thread_safe>::SlotSpanMetadata( SlotSpanMetadata<thread_safe>::SlotSpanMetadata(
PartitionBucket<thread_safe>* bucket) PartitionBucket<thread_safe>* bucket)
@ -319,7 +312,7 @@ void UnmapNow(uintptr_t reservation_start,
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) { if (pool == GetBRPPool()) {
// In 32-bit mode, the beginning of a reservation may be excluded from the // In 32-bit mode, the beginning of a reservation may be excluded from the
// BRP pool, so shift the pointer. Other pools don't have this logic. // BRP pool, so shift the pointer. Other pools don't have this logic.
PA_DCHECK(IsManagedByPartitionAllocBRPPool( PA_DCHECK(IsManagedByPartitionAllocBRPPool(
@ -334,8 +327,8 @@ void UnmapNow(uintptr_t reservation_start,
} else } else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{ {
PA_DCHECK(pool == kRegularPoolHandle || (IsConfigurablePoolAvailable() && PA_DCHECK(pool == GetRegularPool() ||
pool == kConfigurablePoolHandle)); (IsConfigurablePoolAvailable() && pool == GetConfigurablePool()));
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode. // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) || PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
IsManagedByPartitionAllocConfigurablePool(reservation_start)); IsManagedByPartitionAllocConfigurablePool(reservation_start));

View File

@ -296,13 +296,7 @@ struct SlotSpanMetadata {
// TODO(ajwong): Can this be made private? https://crbug.com/787153 // TODO(ajwong): Can this be made private? https://crbug.com/787153
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
static const SlotSpanMetadata* get_sentinel_slot_span(); static SlotSpanMetadata* get_sentinel_slot_span();
// The sentinel is not supposed to be modified and hence we mark it as const
// under the hood. However, we often store it together with mutable metadata
// objects and need a non-const pointer.
// You can use this function for this case, but you need to ensure that the
// returned object will not be written to.
static SlotSpanMetadata* get_sentinel_slot_span_non_const();
// Slot span state getters. // Slot span state getters.
PA_ALWAYS_INLINE bool is_active() const; PA_ALWAYS_INLINE bool is_active() const;
@ -322,7 +316,7 @@ struct SlotSpanMetadata {
// //
// Note, this declaration is kept in the header as opposed to an anonymous // Note, this declaration is kept in the header as opposed to an anonymous
// namespace so the getter can be fully inlined. // namespace so the getter can be fully inlined.
static const SlotSpanMetadata sentinel_slot_span_; static SlotSpanMetadata sentinel_slot_span_;
// For the sentinel. // For the sentinel.
constexpr SlotSpanMetadata() noexcept constexpr SlotSpanMetadata() noexcept
: marked_full(0), : marked_full(0),

View File

@ -8,7 +8,6 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -21,6 +20,10 @@
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
#endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@ -216,18 +219,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
return alive; return alive;
} }
// Called when a raw_ptr is not banning dangling ptrs, but the user still
// wants to ensure the pointer is not currently dangling. This is currently
// used in UnretainedWrapper to make sure callbacks are not invoked with
// dangling pointers. If such a raw_ptr exists but the allocation is no longer
// alive, then we have a dangling pointer to a dead object.
PA_ALWAYS_INLINE void ReportIfDangling() {
if (!IsAlive()) {
partition_alloc::internal::UnretainedDanglingRawPtrDetected(
reinterpret_cast<uintptr_t>(this));
}
}
// GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
// make sure the `raw_ptr<T>` release operation will never attempt to call the // make sure the `raw_ptr<T>` release operation will never attempt to call the
// PA `free` on such a slot. GWP-ASan takes the extra reference into account // PA `free` on such a slot. GWP-ASan takes the extra reference into account

View File

@ -683,7 +683,7 @@ template <bool thread_safe>
#endif // #if !defined(ARCH_CPU_64_BITS) #endif // #if !defined(ARCH_CPU_64_BITS)
// Out of memory can be due to multiple causes, such as: // Out of memory can be due to multiple causes, such as:
// - Out of virtual address space in the desired pool // - Out of GigaCage virtual address space
// - Out of commit due to either our process, or another one // - Out of commit due to either our process, or another one
// - Excessive allocations in the current process // - Excessive allocations in the current process
// //
@ -831,8 +831,7 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// We mark the sentinel slot span as free to make sure it is skipped by our // We mark the sentinel slot span as free to make sure it is skipped by our
// logic to find a new active slot span. // logic to find a new active slot span.
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket)); memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
sentinel_bucket.active_slot_spans_head = sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
SlotSpan::get_sentinel_slot_span_non_const();
// This is a "magic" value so we can test if a root pointer is valid. // This is a "magic" value so we can test if a root pointer is valid.
inverted_self = ~reinterpret_cast<uintptr_t>(this); inverted_self = ~reinterpret_cast<uintptr_t>(this);

View File

@ -389,10 +389,10 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); } explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
~PartitionRoot(); ~PartitionRoot();
// This will unreserve any space in the pool that the PartitionRoot is // This will unreserve any space in the GigaCage that the PartitionRoot is
// using. This is needed because many tests create and destroy many // using. This is needed because many tests create and destroy many
// PartitionRoots over the lifetime of a process, which can exhaust the // PartitionRoots over the lifetime of a process, which can exhaust the
// pool and cause tests to fail. // GigaCage and cause tests to fail.
void DestructForTesting(); void DestructForTesting();
#if defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK) #if defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK)
@ -632,14 +632,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
internal::pool_handle ChoosePool() const { internal::pool_handle ChoosePool() const {
if (flags.use_configurable_pool) { if (flags.use_configurable_pool) {
PA_DCHECK(IsConfigurablePoolAvailable()); return internal::GetConfigurablePool();
return internal::kConfigurablePoolHandle;
} }
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return brp_enabled() ? internal::kBRPPoolHandle return brp_enabled() ? internal::GetBRPPool() : internal::GetRegularPool();
: internal::kRegularPoolHandle;
#else #else
return internal::kRegularPoolHandle; return internal::GetRegularPool();
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} }
@ -1173,8 +1171,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
// Returns whether MTE is supported for this partition root. Because MTE stores // Returns whether MTE is supported for this partition root. Because MTE stores
// tagging information in the high bits of the pointer, it causes issues with // tagging information in the high bits of the pointer, it causes issues with
// components like V8's ArrayBuffers which use custom pointer representations. // components like V8's ArrayBuffers which use custom pointer representations.
// All custom representations encountered so far rely on an "is in configurable // All custom representations encountered so far rely on a caged memory address
// pool?" check, so we use that as a proxy. // area / configurable pool, so we use that as a proxy.
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled() PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
const { const {
@ -1197,7 +1195,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
uintptr_t object_addr = internal::ObjectPtr2Addr(object); uintptr_t object_addr = internal::ObjectPtr2Addr(object);
// On Android, malloc() interception is more fragile than on other // On Android, malloc() interception is more fragile than on other
// platforms, as we use wrapped symbols. However, the pools allow us to // platforms, as we use wrapped symbols. However, the GigaCage allows us to
// quickly tell that a pointer was allocated with PartitionAlloc. // quickly tell that a pointer was allocated with PartitionAlloc.
// //
// This is a crash to detect imperfect symbol interception. However, we can // This is a crash to detect imperfect symbol interception. However, we can

View File

@ -60,10 +60,10 @@ static constexpr uint16_t kOffsetTagNormalBuckets =
// //
// *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used // *) In 32-bit mode, Y is not used by PartitionAlloc, and cannot be used
// until X is unreserved, because PartitionAlloc always uses kSuperPageSize // until X is unreserved, because PartitionAlloc always uses kSuperPageSize
// alignment when reserving address spaces. One can use check "is in pool?" // alignment when reserving address spaces. One can use "GigaCage" to
// to further determine which part of the super page is used by // further determine which part of the supe page is used by PartitionAlloc.
// PartitionAlloc. This isn't a problem in 64-bit mode, where allocation // This isn't a problem in 64-bit mode, where allocation granularity is
// granularity is kSuperPageSize. // kSuperPageSize.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
public: public:
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
@ -81,7 +81,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets, static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets."); "Offsets should be smaller than kOffsetTagNormalBuckets.");
static PA_CONSTINIT struct _ReservationOffsetTable { static struct _ReservationOffsetTable {
// The number of table elements is less than MAX_UINT16, so the element type // The number of table elements is less than MAX_UINT16, so the element type
// can be uint16_t. // can be uint16_t.
static_assert( static_assert(

View File

@ -10,13 +10,18 @@ include_rules = [
"+base/allocator/buildflags.h", "+base/allocator/buildflags.h",
"+base/allocator/early_zone_registration_mac.h", "+base/allocator/early_zone_registration_mac.h",
"+base/allocator/partition_alloc_features.h", "+base/allocator/partition_alloc_features.h",
"+base/allocator/partition_allocator/partition_alloc_base",
"+base/base_export.h", "+base/base_export.h",
"+base/bind.h", "+base/bind.h",
"+base/compiler_specific.h",
"+base/feature_list.h",
"+base/ios/ios_util.h",
"+base/logging.h", "+base/logging.h",
"+base/mac/mac_util.h",
"+base/mac/mach_logging.h", "+base/mac/mach_logging.h",
"+base/memory/nonscannable_memory.h", "+base/memory/nonscannable_memory.h",
"+base/memory/page_size.h", "+base/memory/page_size.h",
"+base/numerics/checked_math.h",
"+base/numerics/safe_conversions.h",
"+base/process/memory.h", "+base/process/memory.h",
"+base/synchronization/lock.h", "+base/synchronization/lock.h",
"+base/threading/platform_thread.h", "+base/threading/platform_thread.h",
@ -30,6 +35,7 @@ include_rules = [
specific_include_rules = { specific_include_rules = {
"allocator_shim_unittest\.cc$": [ "allocator_shim_unittest\.cc$": [
"+base/mac/mac_util.h",
"+base/synchronization/waitable_event.h", "+base/synchronization/waitable_event.h",
"+base/threading/thread_local.h", "+base/threading/thread_local.h",
], ],

View File

@ -40,9 +40,9 @@
#include "third_party/apple_apsl/CFBase.h" #include "third_party/apple_apsl/CFBase.h"
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
#include "base/allocator/partition_allocator/partition_alloc_base/ios/ios_util.h" #include "base/ios/ios_util.h"
#else #else
#include "base/allocator/partition_allocator/partition_alloc_base/mac/mac_util.h" #include "base/mac/mac_util.h"
#endif #endif
namespace allocator_shim { namespace allocator_shim {
@ -237,9 +237,9 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
bool CanGetContextForCFAllocator() { bool CanGetContextForCFAllocator() {
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
return !partition_alloc::internal::base::ios::IsRunningOnOrLater(17, 0, 0); return !base::ios::IsRunningOnOrLater(17, 0, 0);
#else #else
return !partition_alloc::internal::base::mac::IsOSLaterThan13_DontCallThis(); return !base::mac::IsOSLaterThan13_DontCallThis();
#endif #endif
} }

View File

@ -4,9 +4,9 @@
#include <limits> #include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/shim/allocator_shim.h" #include "base/allocator/partition_allocator/shim/allocator_shim.h"
#include "base/compiler_specific.h"
#include "base/numerics/checked_math.h"
#include "base/process/memory.h" #include "base/process/memory.h"
#include <dlfcn.h> #include <dlfcn.h>
@ -36,7 +36,7 @@ constexpr size_t kMaxAllowedSize = std::numeric_limits<int>::max() - (1 << 12);
void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) { void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
// Cannot force glibc's malloc() to crash when a large size is requested, do // Cannot force glibc's malloc() to crash when a large size is requested, do
// it in the shim instead. // it in the shim instead.
if (PA_UNLIKELY(size >= kMaxAllowedSize)) if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size); base::TerminateBecauseOutOfMemory(size);
return __libc_malloc(size); return __libc_malloc(size);
@ -45,7 +45,7 @@ void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
void* GlibcUncheckedMalloc(const AllocatorDispatch*, void* GlibcUncheckedMalloc(const AllocatorDispatch*,
size_t size, size_t size,
void* context) { void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) if (UNLIKELY(size >= kMaxAllowedSize))
return nullptr; return nullptr;
return __libc_malloc(size); return __libc_malloc(size);
@ -55,8 +55,8 @@ void* GlibcCalloc(const AllocatorDispatch*,
size_t n, size_t n,
size_t size, size_t size,
void* context) { void* context) {
const auto total = partition_alloc::internal::base::CheckMul(n, size); const auto total = base::CheckMul(n, size);
if (PA_UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize)) if (UNLIKELY(!total.IsValid() || total.ValueOrDie() >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size * n); base::TerminateBecauseOutOfMemory(size * n);
return __libc_calloc(n, size); return __libc_calloc(n, size);
@ -66,7 +66,7 @@ void* GlibcRealloc(const AllocatorDispatch*,
void* address, void* address,
size_t size, size_t size,
void* context) { void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size); base::TerminateBecauseOutOfMemory(size);
return __libc_realloc(address, size); return __libc_realloc(address, size);
@ -76,7 +76,7 @@ void* GlibcMemalign(const AllocatorDispatch*,
size_t alignment, size_t alignment,
size_t size, size_t size,
void* context) { void* context) {
if (PA_UNLIKELY(size >= kMaxAllowedSize)) if (UNLIKELY(size >= kMaxAllowedSize))
base::TerminateBecauseOutOfMemory(size); base::TerminateBecauseOutOfMemory(size);
return __libc_memalign(alignment, size); return __libc_memalign(alignment, size);
@ -86,7 +86,7 @@ void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
__libc_free(address); __libc_free(address);
} }
PA_NO_SANITIZE("cfi-icall") NO_SANITIZE("cfi-icall")
size_t GlibcGetSizeEstimate(const AllocatorDispatch*, size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
void* address, void* address,
void* context) { void* context) {

View File

@ -16,10 +16,7 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h" #include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h" #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
@ -27,7 +24,11 @@
#include "base/allocator/partition_allocator/partition_root.h" #include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h" #include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h" #include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/memory/nonscannable_memory.h" #include "base/memory/nonscannable_memory.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "build/chromecast_buildflags.h" #include "build/chromecast_buildflags.h"
@ -77,9 +78,9 @@ class LeakySingleton {
public: public:
constexpr LeakySingleton() = default; constexpr LeakySingleton() = default;
PA_ALWAYS_INLINE T* Get() { ALWAYS_INLINE T* Get() {
auto* instance = instance_.load(std::memory_order_acquire); auto* instance = instance_.load(std::memory_order_acquire);
if (PA_LIKELY(instance)) if (LIKELY(instance))
return instance; return instance;
return GetSlowPath(); return GetSlowPath();
@ -176,7 +177,7 @@ class MainPartitionConstructor {
LeakySingleton<partition_alloc::ThreadSafePartitionRoot, LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
MainPartitionConstructor> MainPartitionConstructor>
g_root PA_CONSTINIT = {}; g_root CONSTINIT = {};
partition_alloc::ThreadSafePartitionRoot* Allocator() { partition_alloc::ThreadSafePartitionRoot* Allocator() {
return g_root.Get(); return g_root.Get();
} }
@ -193,7 +194,7 @@ class AlignedPartitionConstructor {
LeakySingleton<partition_alloc::ThreadSafePartitionRoot, LeakySingleton<partition_alloc::ThreadSafePartitionRoot,
AlignedPartitionConstructor> AlignedPartitionConstructor>
g_aligned_root PA_CONSTINIT = {}; g_aligned_root CONSTINIT = {};
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() { partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() {
return g_original_root.load(std::memory_order_relaxed); return g_original_root.load(std::memory_order_relaxed);
@ -231,7 +232,7 @@ size_t g_extra_bytes;
#endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86) #endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
// TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed. // TODO(brucedawson): Remove this when https://crbug.com/1151455 is fixed.
PA_ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) { ALWAYS_INLINE size_t MaybeAdjustSize(size_t size) {
#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86) #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
return base::CheckAdd(size, g_extra_bytes).ValueOrDie(); return base::CheckAdd(size, g_extra_bytes).ValueOrDie();
#else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86) #else // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86)
@ -319,9 +320,7 @@ void* PartitionCalloc(const AllocatorDispatch*,
size_t size, size_t size,
void* context) { void* context) {
partition_alloc::ScopedDisallowAllocations guard{}; partition_alloc::ScopedDisallowAllocations guard{};
const size_t total = const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
partition_alloc::internal::base::CheckMul(n, MaybeAdjustSize(size))
.ValueOrDie();
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total, partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
partition_alloc::PartitionPageSize()); partition_alloc::PartitionPageSize());
@ -387,7 +386,7 @@ void* PartitionRealloc(const AllocatorDispatch*,
void* context) { void* context) {
partition_alloc::ScopedDisallowAllocations guard{}; partition_alloc::ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(address)) && reinterpret_cast<uintptr_t>(address)) &&
address)) { address)) {
// A memory region allocated by the system allocator is passed in this // A memory region allocated by the system allocator is passed in this
@ -412,7 +411,7 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
partition_alloc::ScopedDisallowAllocations guard{}; partition_alloc::ScopedDisallowAllocations guard{};
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
// TODO(bartekn): Add MTE unmasking here (and below). // TODO(bartekn): Add MTE unmasking here (and below).
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) && reinterpret_cast<uintptr_t>(object)) &&
object)) { object)) {
// A memory region allocated by the system allocator is passed in this // A memory region allocated by the system allocator is passed in this
@ -427,7 +426,7 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
// the pointer, pass it along. This should not have a runtime cost vs regular // the pointer, pass it along. This should not have a runtime cost vs regular
// Android, since on Android we have a PA_CHECK() rather than the branch here. // Android, since on Android we have a PA_CHECK() rather than the branch here.
#if BUILDFLAG(IS_CAST_ANDROID) #if BUILDFLAG(IS_CAST_ANDROID)
if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( if (UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc(
reinterpret_cast<uintptr_t>(object)) && reinterpret_cast<uintptr_t>(object)) &&
object)) { object)) {
// A memory region allocated by the system allocator is passed in this // A memory region allocated by the system allocator is passed in this
@ -789,22 +788,19 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
info.arena = 0; // Memory *not* allocated with mmap(). info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size. // Memory allocated with mmap(), aka virtual size.
info.hblks = info.hblks = base::checked_cast<decltype(info.hblks)>(
partition_alloc::internal::base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes + allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes + aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes + nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes); nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
// Resident bytes. // Resident bytes.
info.hblkhd = info.hblkhd = base::checked_cast<decltype(info.hblkhd)>(
partition_alloc::internal::base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes + allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes + aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes + nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes); nonquarantinable_allocator_dumper.stats().total_resident_bytes);
// Allocated bytes. // Allocated bytes.
info.uordblks = info.uordblks = base::checked_cast<decltype(info.uordblks)>(
partition_alloc::internal::base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes + allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes + aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes + nonscannable_allocator_dumper.stats().total_active_bytes +

View File

@ -15,8 +15,8 @@
#include <new> #include <new>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h" #include "base/allocator/partition_allocator/shim/allocator_shim_internals.h"
#include "base/compiler_specific.h"
#include "build/build_config.h" #include "build/build_config.h"
#if !BUILDFLAG(IS_APPLE) #if !BUILDFLAG(IS_APPLE)
@ -28,7 +28,7 @@
// it is also needless, since no library used on macOS imports these. // it is also needless, since no library used on macOS imports these.
// //
// TODO(lizeb): It may not be necessary anywhere to export these. // TODO(lizeb): It may not be necessary anywhere to export these.
#define SHIM_CPP_SYMBOLS_EXPORT PA_NOINLINE #define SHIM_CPP_SYMBOLS_EXPORT NOINLINE
#endif #endif
SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) { SHIM_CPP_SYMBOLS_EXPORT void* operator new(size_t size) {

View File

@ -16,8 +16,8 @@
#include <limits> #include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/numerics/safe_conversions.h"
namespace allocator_shim { namespace allocator_shim {
@ -122,8 +122,7 @@ void* AlignAllocation(void* ptr, size_t alignment) {
// Write the prefix. // Write the prefix.
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1; AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
prefix->original_allocation_offset = prefix->original_allocation_offset = base::checked_cast<unsigned int>(
partition_alloc::internal::base::checked_cast<unsigned int>(
address - reinterpret_cast<uintptr_t>(ptr)); address - reinterpret_cast<uintptr_t>(ptr));
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
prefix->magic = AlignedPrefix::kMagic; prefix->magic = AlignedPrefix::kMagic;

View File

@ -765,10 +765,10 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
private: private:
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() { PA_ALWAYS_INLINE static uintptr_t CageBase() {
return PartitionAddressSpace::RegularPoolBase(); return PartitionAddressSpace::RegularPoolBase();
} }
PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() { PA_ALWAYS_INLINE static uintptr_t CageMask() {
return PartitionAddressSpace::RegularPoolBaseMask(); return PartitionAddressSpace::RegularPoolBaseMask();
} }
#endif // defined(PA_HAS_64_BITS_POINTERS) #endif // defined(PA_HAS_64_BITS_POINTERS)
@ -1274,7 +1274,7 @@ PCScanInternal::~PCScanInternal() = default;
void PCScanInternal::Initialize(PCScan::InitConfig config) { void PCScanInternal::Initialize(PCScan::InitConfig config) {
PA_DCHECK(!is_initialized_); PA_DCHECK(!is_initialized_);
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// Make sure that pools are initialized. // Make sure that GigaCage is initialized.
PartitionAddressSpace::Init(); PartitionAddressSpace::Init();
#endif #endif
CommitCardTable(); CommitCardTable();

View File

@ -38,7 +38,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
// Iterates over range of memory using the best available SIMD extension. // Iterates over range of memory using the best available SIMD extension.
// Assumes that 64bit platforms have pool support and the begin pointer of // Assumes that 64bit platforms have cage support and the begin pointer of
// incoming ranges are properly aligned. The class is designed around the CRTP // incoming ranges are properly aligned. The class is designed around the CRTP
// version of the "template method" (in GoF terms). CRTP is needed for fast // version of the "template method" (in GoF terms). CRTP is needed for fast
// static dispatch. // static dispatch.
@ -75,8 +75,7 @@ class ScanLoop {
template <typename Derived> template <typename Derived>
void ScanLoop<Derived>::Run(uintptr_t begin, uintptr_t end) { void ScanLoop<Derived>::Run(uintptr_t begin, uintptr_t end) {
// We allow vectorization only for 64bit since they require support of the // We allow vectorization only for 64bit since they require support of the
// 64bit regular pool, and only for x86 because a special instruction set is // 64bit cage, and only for x86 because a special instruction set is required.
// required.
#if defined(ARCH_CPU_X86_64) #if defined(ARCH_CPU_X86_64)
if (simd_type_ == SimdSupport::kAVX2) if (simd_type_ == SimdSupport::kAVX2)
return RunAVX2(begin, end); return RunAVX2(begin, end);
@ -96,8 +95,8 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency. // MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask; const uintptr_t mask = Derived::CageMask() & kPtrUntagMask;
const uintptr_t base = Derived::RegularPoolBase(); const uintptr_t base = Derived::CageBase();
#endif #endif
for (; begin < end; begin += sizeof(uintptr_t)) { for (; begin < end; begin += sizeof(uintptr_t)) {
// Read the region word-by-word. Everything that we read is a potential // Read the region word-by-word. Everything that we read is a potential
@ -129,24 +128,24 @@ __attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
// example, according to the Intel docs, on Broadwell and Haswell the CPI of // example, according to the Intel docs, on Broadwell and Haswell the CPI of
// vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd // vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
// (_mm256_load_pd). // (_mm256_load_pd).
const __m256i vbase = _mm256_set1_epi64x(derived().RegularPoolBase()); const __m256i vbase = _mm256_set1_epi64x(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency. // MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m256i regular_pool_mask = const __m256i cage_mask =
_mm256_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask); _mm256_set1_epi64x(derived().CageMask() & kPtrUntagMask);
static_assert(sizeof(__m256i) == kBytesInVector); static_assert(sizeof(__m256i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
const __m256i maybe_ptrs = const __m256i maybe_ptrs =
_mm256_load_si256(reinterpret_cast<__m256i*>(begin)); _mm256_load_si256(reinterpret_cast<__m256i*>(begin));
const __m256i vand = _mm256_and_si256(maybe_ptrs, regular_pool_mask); const __m256i vand = _mm256_and_si256(maybe_ptrs, cage_mask);
const __m256i vcmp = _mm256_cmpeq_epi64(vand, vbase); const __m256i vcmp = _mm256_cmpeq_epi64(vand, vbase);
const int mask = _mm256_movemask_pd(_mm256_castsi256_pd(vcmp)); const int mask = _mm256_movemask_pd(_mm256_castsi256_pd(vcmp));
if (PA_LIKELY(!mask)) if (PA_LIKELY(!mask))
continue; continue;
// It's important to extract pointers from the already loaded vector. // It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-pool assumption checked above. // Otherwise, new loads can break in-cage assumption checked above.
if (mask & 0b0001) if (mask & 0b0001)
derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 0)); derived().CheckPointer(_mm256_extract_epi64(maybe_ptrs, 0));
if (mask & 0b0010) if (mask & 0b0010)
@ -168,24 +167,24 @@ __attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
static constexpr size_t kWordsInVector = 2; static constexpr size_t kWordsInVector = 2;
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t); static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement)); PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const __m128i vbase = _mm_set1_epi64x(derived().RegularPoolBase()); const __m128i vbase = _mm_set1_epi64x(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency. // MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m128i regular_pool_mask = const __m128i cage_mask =
_mm_set1_epi64x(derived().RegularPoolMask() & kPtrUntagMask); _mm_set1_epi64x(derived().CageMask() & kPtrUntagMask);
static_assert(sizeof(__m128i) == kBytesInVector); static_assert(sizeof(__m128i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
const __m128i maybe_ptrs = const __m128i maybe_ptrs =
_mm_loadu_si128(reinterpret_cast<__m128i*>(begin)); _mm_loadu_si128(reinterpret_cast<__m128i*>(begin));
const __m128i vand = _mm_and_si128(maybe_ptrs, regular_pool_mask); const __m128i vand = _mm_and_si128(maybe_ptrs, cage_mask);
const __m128i vcmp = _mm_cmpeq_epi64(vand, vbase); const __m128i vcmp = _mm_cmpeq_epi64(vand, vbase);
const int mask = _mm_movemask_pd(_mm_castsi128_pd(vcmp)); const int mask = _mm_movemask_pd(_mm_castsi128_pd(vcmp));
if (PA_LIKELY(!mask)) if (PA_LIKELY(!mask))
continue; continue;
// It's important to extract pointers from the already loaded vector. // It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-pool assumption checked above. // Otherwise, new loads can break in-cage assumption checked above.
if (mask & 0b01) { if (mask & 0b01) {
derived().CheckPointer(_mm_cvtsi128_si64(maybe_ptrs)); derived().CheckPointer(_mm_cvtsi128_si64(maybe_ptrs));
} }
@ -209,22 +208,22 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
static constexpr size_t kWordsInVector = 2; static constexpr size_t kWordsInVector = 2;
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t); static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement)); PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const uint64x2_t vbase = vdupq_n_u64(derived().RegularPoolBase()); const uint64x2_t vbase = vdupq_n_u64(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |regular_pool_mask| to untag, for efficiency. // MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const uint64x2_t regular_pool_mask = const uint64x2_t cage_mask =
vdupq_n_u64(derived().RegularPoolMask() & kPtrUntagMask); vdupq_n_u64(derived().CageMask() & kPtrUntagMask);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
const uint64x2_t maybe_ptrs = vld1q_u64(reinterpret_cast<uint64_t*>(begin)); const uint64x2_t maybe_ptrs = vld1q_u64(reinterpret_cast<uint64_t*>(begin));
const uint64x2_t vand = vandq_u64(maybe_ptrs, regular_pool_mask); const uint64x2_t vand = vandq_u64(maybe_ptrs, cage_mask);
const uint64x2_t vcmp = vceqq_u64(vand, vbase); const uint64x2_t vcmp = vceqq_u64(vand, vbase);
const uint32_t max = vmaxvq_u32(vreinterpretq_u32_u64(vcmp)); const uint32_t max = vmaxvq_u32(vreinterpretq_u32_u64(vcmp));
if (PA_LIKELY(!max)) if (PA_LIKELY(!max))
continue; continue;
// It's important to extract pointers from the already loaded vector. // It's important to extract pointers from the already loaded vector.
// Otherwise, new loads can break in-pool assumption checked above. // Otherwise, new loads can break in-cage assumption checked above.
if (vgetq_lane_u64(vcmp, 0)) if (vgetq_lane_u64(vcmp, 0))
derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 0)); derived().CheckPointer(vgetq_lane_u64(maybe_ptrs, 0));
if (vgetq_lane_u64(vcmp, 1)) if (vgetq_lane_u64(vcmp, 1))

View File

@ -77,7 +77,7 @@ UserFaultFDWriteProtector::UserFaultFDWriteProtector()
PA_CHECK(-1 != ioctl(uffd_, UFFDIO_API, &uffdio_api)); PA_CHECK(-1 != ioctl(uffd_, UFFDIO_API, &uffdio_api));
PA_CHECK(UFFD_API == uffdio_api.api); PA_CHECK(UFFD_API == uffdio_api.api);
// Register the regular pool to listen uffd events. // Register the giga-cage to listen uffd events.
struct uffdio_register uffdio_register; struct uffdio_register uffdio_register;
uffdio_register.range.start = PartitionAddressSpace::RegularPoolBase(); uffdio_register.range.start = PartitionAddressSpace::RegularPoolBase();
uffdio_register.range.len = kPoolMaxSize; uffdio_register.range.len = kPoolMaxSize;

View File

@ -12,23 +12,20 @@ namespace base::android::features {
// When the browser process has been in the background for several minutes at a // When the browser process has been in the background for several minutes at a
// time, trigger an artificial critical memory pressure notification. This is // time, trigger an artificial critical memory pressure notification. This is
// intended to reduce memory footprint. // intended to reduce memory footprint.
BASE_FEATURE(kBrowserProcessMemoryPurge, const base::Feature kBrowserProcessMemoryPurge{"BrowserProcessMemoryPurge",
"BrowserProcessMemoryPurge", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// Crash the browser process if a child process is created which does not match // Crash the browser process if a child process is created which does not match
// the browser process and the browser package appears to have changed since the // the browser process and the browser package appears to have changed since the
// browser process was launched, so that the browser process will be started // browser process was launched, so that the browser process will be started
// fresh when next used, hopefully resolving the issue. // fresh when next used, hopefully resolving the issue.
BASE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged, const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged{
"CrashBrowserOnChildMismatchIfBrowserChanged", "CrashBrowserOnChildMismatchIfBrowserChanged", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
// Crash the browser process if a child process is created which does not match // Crash the browser process if a child process is created which does not match
// the browser process regardless of whether the browser package appears to have // the browser process regardless of whether the browser package appears to have
// changed. // changed.
BASE_FEATURE(kCrashBrowserOnAnyChildMismatch, const base::Feature kCrashBrowserOnAnyChildMismatch{
"CrashBrowserOnAnyChildMismatch", "CrashBrowserOnAnyChildMismatch", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
} // namespace base::android::features } // namespace base::android::features

View File

@ -13,9 +13,9 @@ namespace base::android::features {
// alongside the definition of their values in the .cc file. // alongside the definition of their values in the .cc file.
// Alphabetical: // Alphabetical:
BASE_DECLARE_FEATURE(kBrowserProcessMemoryPurge); extern const base::Feature kBrowserProcessMemoryPurge;
BASE_DECLARE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged); extern const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged;
BASE_DECLARE_FEATURE(kCrashBrowserOnAnyChildMismatch); extern const base::Feature kCrashBrowserOnAnyChildMismatch;
} // namespace base::android::features } // namespace base::android::features

View File

@ -35,8 +35,6 @@ enum SdkVersion {
SDK_VERSION_Q = 29, SDK_VERSION_Q = 29,
SDK_VERSION_R = 30, SDK_VERSION_R = 30,
SDK_VERSION_S = 31, SDK_VERSION_S = 31,
SDK_VERSION_Sv2 = 32,
SDK_VERSION_T = 33,
}; };
// BuildInfo is a singleton class that stores android build and device // BuildInfo is a singleton class that stores android build and device

View File

@ -35,9 +35,6 @@ class TrialLogger : public base::FieldTrialList::Observer {
static void Log(const std::string& trial_name, static void Log(const std::string& trial_name,
const std::string& group_name) { const std::string& group_name) {
// Changes to format of the log message below must be accompanied by
// changes to finch smoke tests since they look for this log message
// in the logcat.
LOG(INFO) << "Active field trial \"" << trial_name LOG(INFO) << "Active field trial \"" << trial_name
<< "\" in group \"" << group_name<< '"'; << "\" in group \"" << group_name<< '"';
} }

View File

@ -160,9 +160,8 @@ JavaHandlerThread::State::State()
sequence_manager::SequenceManager::Settings::Builder() sequence_manager::SequenceManager::Settings::Builder()
.SetMessagePumpType(base::MessagePumpType::JAVA) .SetMessagePumpType(base::MessagePumpType::JAVA)
.Build())), .Build())),
default_task_queue( default_task_queue(sequence_manager->CreateTaskQueue(
sequence_manager->CreateTaskQueue(sequence_manager::TaskQueue::Spec( sequence_manager::TaskQueue::Spec("default_tq"))) {
sequence_manager::QueueName::DEFAULT_TQ))) {
// TYPE_JAVA to get the Android java style message loop. // TYPE_JAVA to get the Android java style message loop.
std::unique_ptr<MessagePump> message_pump = std::unique_ptr<MessagePump> message_pump =
MessagePump::Create(base::MessagePumpType::JAVA); MessagePump::Create(base::MessagePumpType::JAVA);

View File

@ -253,7 +253,7 @@ If a Java object "owns" a native one, store the pointer via
the object. For example, have a `close()` that deletes the native object. the object. For example, have a `close()` that deletes the native object.
The best way to pass "compound" types across in either direction is to The best way to pass "compound" types across in either direction is to
create an inner class with PODs and a factory function. If possible, mark create an inner class with PODs and a factory function. If possible, make mark
all the fields as "final". all the fields as "final".
## Build Rules ## Build Rules

View File

@ -1553,18 +1553,18 @@ def GetScriptName():
return os.sep.join(script_components[base_index:]) return os.sep.join(script_components[base_index:])
def _RemoveStaleHeaders(path, output_names): def _RemoveStaleHeaders(path, output_files):
if not os.path.isdir(path): if not os.path.isdir(path):
return return
# Do not remove output files so that timestamps on declared outputs are not # Do not remove output files so that timestamps on declared outputs are not
# modified unless their contents are changed (avoids reverse deps needing to # modified unless their contents are changed (avoids reverse deps needing to
# be rebuilt). # be rebuilt).
preserve = set(output_names) preserve = set(output_files)
for root, _, files in os.walk(path): for root, _, files in os.walk(path):
for f in files: for f in files:
if f not in preserve:
file_path = os.path.join(root, f) file_path = os.path.join(root, f)
if os.path.isfile(file_path) and file_path.endswith('.h'): if file_path not in preserve:
if os.path.isfile(file_path) and os.path.splitext(file_path)[1] == '.h':
os.remove(file_path) os.remove(file_path)
@ -1591,21 +1591,18 @@ See SampleForTests.java for more details.
help='Uses as a namespace in the generated header ' help='Uses as a namespace in the generated header '
'instead of the javap class name, or when there is ' 'instead of the javap class name, or when there is '
'no JNINamespace annotation in the java source.') 'no JNINamespace annotation in the java source.')
parser.add_argument('--input_file', parser.add_argument(
'--input_file',
action='append', action='append',
required=True, required=True,
dest='input_files', dest='input_files',
help='Input filenames, or paths within a .jar if ' help='Input file names, or paths within a .jar if '
'--jar-file is used.') '--jar-file is used.')
parser.add_argument('--output_dir', required=True, help='Output directory.') parser.add_argument(
# TODO(agrieve): --prev_output_dir used only to make incremental builds work. '--output_file',
# Remove --prev_output_dir at some point after 2022.
parser.add_argument('--prev_output_dir',
help='Delete headers found in this directory.')
parser.add_argument('--output_name',
action='append', action='append',
dest='output_names', dest='output_files',
help='Output filenames within output directory.') help='Output file names.')
parser.add_argument( parser.add_argument(
'--script_name', '--script_name',
default=GetScriptName(), default=GetScriptName(),
@ -1654,28 +1651,22 @@ See SampleForTests.java for more details.
parser.add_argument( parser.add_argument(
'--split_name', '--split_name',
help='Split name that the Java classes should be loaded from.') help='Split name that the Java classes should be loaded from.')
# TODO(agrieve): --stamp used only to make incremental builds work.
# Remove --stamp at some point after 2022.
parser.add_argument('--stamp',
help='Process --prev_output_dir and touch this file.')
args = parser.parse_args() args = parser.parse_args()
input_files = args.input_files input_files = args.input_files
output_names = args.output_names output_files = args.output_files
if output_files:
if args.prev_output_dir: output_dirs = set(os.path.dirname(f) for f in output_files)
_RemoveStaleHeaders(args.prev_output_dir, []) if len(output_dirs) != 1:
parser.error(
if args.stamp: 'jni_generator only supports a single output directory per target '
build_utils.Touch(args.stamp) '(got {})'.format(output_dirs))
sys.exit(0) output_dir = output_dirs.pop()
if output_names:
# Remove existing headers so that moving .java source files but not updating # Remove existing headers so that moving .java source files but not updating
# the corresponding C++ include will be a compile failure (otherwise # the corresponding C++ include will be a compile failure (otherwise
# incremental builds will usually not catch this). # incremental builds will usually not catch this).
_RemoveStaleHeaders(args.output_dir, output_names) _RemoveStaleHeaders(output_dir, output_files)
else: else:
output_names = [None] * len(input_files) output_files = [None] * len(input_files)
temp_dir = tempfile.mkdtemp() temp_dir = tempfile.mkdtemp()
try: try:
if args.jar_file: if args.jar_file:
@ -1683,11 +1674,7 @@ See SampleForTests.java for more details.
z.extractall(temp_dir, input_files) z.extractall(temp_dir, input_files)
input_files = [os.path.join(temp_dir, f) for f in input_files] input_files = [os.path.join(temp_dir, f) for f in input_files]
for java_path, header_name in zip(input_files, output_names): for java_path, header_path in zip(input_files, output_files):
if header_name:
header_path = os.path.join(args.output_dir, header_name)
else:
header_path = None
GenerateJNIHeader(java_path, header_path, args) GenerateJNIHeader(java_path, header_path, args)
finally: finally:
shutil.rmtree(temp_dir) shutil.rmtree(temp_dir)

View File

@ -846,9 +846,7 @@ def _MakeProxySignature(proxy_native,
signature_template = string.Template(""" signature_template = string.Template("""
// Hashed name: ${ALT_NAME}""" + native_method_line) // Hashed name: ${ALT_NAME}""" + native_method_line)
# We add the prefix that is sometimes used so that codesearch can find it if alt_name = proxy_native.hashed_proxy_name
# someone searches a full method name from the stacktrace.
alt_name = f'Java_J_N_{proxy_native.hashed_proxy_name}'
proxy_name = proxy_native.proxy_name proxy_name = proxy_native.proxy_name
return signature_template.substitute({ return signature_template.substitute({

View File

@ -70,6 +70,17 @@
<fields>; <fields>;
} }
# Workaround for crbug/1002847. Methods of BaseGmsClient are incorrectly
# removed even though they are required for the derived class GmsClient
# to correctly implement Api$Client.
# TODO: remove once crbug/1002847 resolved.
-keep public class com.google.android.gms.common.internal.BaseGmsClient {
public void disconnect();
public void dump(java.lang.String,java.io.FileDescriptor,java.io.PrintWriter,java.lang.String[]);
public int getMinApkVersion();
public boolean requiresSignIn();
}
# Remove calls to String.format() where the result goes unused. This can mask # Remove calls to String.format() where the result goes unused. This can mask
# exceptions if the parameters to String.format() are invalid, but such cases # exceptions if the parameters to String.format() are invalid, but such cases
# are generally programming bugs anyways. # are generally programming bugs anyways.

View File

@ -5,6 +5,9 @@
# Contains flags that can be safely shared with Cronet, and thus would be # Contains flags that can be safely shared with Cronet, and thus would be
# appropriate for third-party apps to include. # appropriate for third-party apps to include.
# Android support library annotations will get converted to androidx ones
# which we want to keep.
-keep @interface androidx.annotation.Keep
-keep @androidx.annotation.Keep class * -keep @androidx.annotation.Keep class *
-keepclasseswithmembers,allowaccessmodification class * { -keepclasseswithmembers,allowaccessmodification class * {
@androidx.annotation.Keep <fields>; @androidx.annotation.Keep <fields>;

View File

@ -11,35 +11,18 @@ namespace base {
namespace android { namespace android {
namespace { namespace {
RadioUtils::OverrideForTesting* g_overrider_for_tests = nullptr;
bool InitializeIsSupported() { bool InitializeIsSupported() {
JNIEnv* env = AttachCurrentThread(); JNIEnv* env = AttachCurrentThread();
return Java_RadioUtils_isSupported(env); return Java_RadioUtils_isSupported(env);
} }
} // namespace } // namespace
RadioUtils::OverrideForTesting::OverrideForTesting() {
DCHECK(!g_overrider_for_tests);
g_overrider_for_tests = this;
}
RadioUtils::OverrideForTesting::~OverrideForTesting() {
DCHECK(g_overrider_for_tests);
g_overrider_for_tests = nullptr;
}
bool RadioUtils::IsSupported() { bool RadioUtils::IsSupported() {
static const bool kIsSupported = InitializeIsSupported(); static const bool kIsSupported = InitializeIsSupported();
return kIsSupported; return kIsSupported;
} }
RadioConnectionType RadioUtils::GetConnectionType() { RadioConnectionType RadioUtils::GetConnectionType() {
if (g_overrider_for_tests) {
// If GetConnectionType is being used in tests
return g_overrider_for_tests->GetConnectionType();
}
if (!IsSupported()) if (!IsSupported())
return RadioConnectionType::kUnknown; return RadioConnectionType::kUnknown;

View File

@ -39,20 +39,6 @@ enum class RadioConnectionType {
class BASE_EXPORT RadioUtils { class BASE_EXPORT RadioUtils {
public: public:
class OverrideForTesting {
public:
OverrideForTesting();
~OverrideForTesting();
void SetConnectionTypeForTesting(RadioConnectionType connection_type) {
connection_type_ = connection_type;
}
RadioConnectionType GetConnectionType() { return connection_type_; }
private:
RadioConnectionType connection_type_;
};
static bool IsSupported(); static bool IsSupported();
static RadioConnectionType GetConnectionType(); static RadioConnectionType GetConnectionType();
static absl::optional<RadioSignalLevel> GetCellSignalLevel(); static absl::optional<RadioSignalLevel> GetCellSignalLevel();

View File

@ -237,8 +237,6 @@ static void JNI_TraceEvent_InstantAndroidIPC(JNIEnv* env,
}); });
} }
#if BUILDFLAG(ENABLE_BASE_TRACING)
static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env, static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
jint block_reason, jint block_reason,
jint allow_reason, jint allow_reason,
@ -264,16 +262,6 @@ static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
}); });
} }
#else // BUILDFLAG(ENABLE_BASE_TRACING)
// Empty implementations when TraceLog isn't available.
static void JNI_TraceEvent_InstantAndroidToolbar(JNIEnv* env,
jint block_reason,
jint allow_reason,
jint snapshot_diff) {}
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
static void JNI_TraceEvent_Begin(JNIEnv* env, static void JNI_TraceEvent_Begin(JNIEnv* env,
const JavaParamRef<jstring>& jname, const JavaParamRef<jstring>& jname,
const JavaParamRef<jstring>& jarg) { const JavaParamRef<jstring>& jarg) {

24
src/base/as_const.h Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_AS_CONST_H_
#define BASE_AS_CONST_H_
#include <type_traits>
namespace base {
// C++14 implementation of C++17's std::as_const():
// https://en.cppreference.com/w/cpp/utility/as_const
template <typename T>
constexpr std::add_const_t<T>& as_const(T& t) noexcept {
return t;
}
template <typename T>
void as_const(const T&& t) = delete;
} // namespace base
#endif // BASE_AS_CONST_H_

View File

@ -6,32 +6,20 @@
#include <stddef.h> #include <stddef.h>
#include "base/check.h"
#include "base/numerics/checked_math.h"
#include "third_party/modp_b64/modp_b64.h" #include "third_party/modp_b64/modp_b64.h"
namespace base { namespace base {
std::string Base64Encode(span<const uint8_t> input) { std::string Base64Encode(span<const uint8_t> input) {
std::string output; std::string output;
Base64EncodeAppend(input, &output); output.resize(modp_b64_encode_len(input.size())); // makes room for null byte
return output;
}
void Base64EncodeAppend(span<const uint8_t> input, std::string* output) {
// Ensure `modp_b64_encode_len` will not overflow. Note this length and
// `modp_b64_encode`'s output includes a trailing NUL byte.
CHECK_LE(input.size(), MODP_B64_MAX_INPUT_LEN);
size_t encode_len = modp_b64_encode_len(input.size());
size_t prefix_len = output->size();
output->resize(base::CheckAdd(encode_len, prefix_len).ValueOrDie());
// modp_b64_encode_len() returns at least 1, so output[0] is safe to use.
const size_t output_size = modp_b64_encode( const size_t output_size = modp_b64_encode(
output->data() + prefix_len, reinterpret_cast<const char*>(input.data()), &(output[0]), reinterpret_cast<const char*>(input.data()), input.size());
input.size());
// `output_size` does not include the trailing NUL byte, so this removes it. output.resize(output_size);
output->resize(prefix_len + output_size); return output;
} }
void Base64Encode(StringPiece input, std::string* output) { void Base64Encode(StringPiece input, std::string* output) {

View File

@ -20,10 +20,6 @@ namespace base {
// Encodes the input binary data in base64. // Encodes the input binary data in base64.
BASE_EXPORT std::string Base64Encode(span<const uint8_t> input); BASE_EXPORT std::string Base64Encode(span<const uint8_t> input);
// Encodes the input binary data in base64 and appends it to the output.
BASE_EXPORT void Base64EncodeAppend(span<const uint8_t> input,
std::string* output);
// Encodes the input string in base64. // Encodes the input string in base64.
BASE_EXPORT void Base64Encode(StringPiece input, std::string* output); BASE_EXPORT void Base64Encode(StringPiece input, std::string* output);

View File

@ -1,12 +1,341 @@
// Copyright 2022 The Chromium Authors // Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_BIND_H_ #ifndef BASE_BIND_H_
#define BASE_BIND_H_ #define BASE_BIND_H_
#include "base/functional/bind.h" #include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include "base/bind_internal.h"
#include "base/compiler_specific.h"
#include "base/memory/raw_ptr.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
#include "base/mac/scoped_block.h"
#endif
// -----------------------------------------------------------------------------
// Usage documentation
// -----------------------------------------------------------------------------
//
// Overview:
// base::BindOnce() and base::BindRepeating() are helpers for creating
// base::OnceCallback and base::RepeatingCallback objects respectively.
//
// For a runnable object of n-arity, the base::Bind*() family allows partial
// application of the first m arguments. The remaining n - m arguments must be
// passed when invoking the callback with Run().
//
// // The first argument is bound at callback creation; the remaining
// // two must be passed when calling Run() on the callback object.
// base::OnceCallback<long(int, long)> cb = base::BindOnce(
// [](short x, int y, long z) { return x * y * z; }, 42);
//
// When binding to a method, the receiver object must also be specified at
// callback creation time. When Run() is invoked, the method will be invoked on
// the specified receiver object.
//
// class C : public base::RefCounted<C> { void F(); };
// auto instance = base::MakeRefCounted<C>();
// auto cb = base::BindOnce(&C::F, instance);
// std::move(cb).Run(); // Identical to instance->F()
//
// See //docs/callback.md for the full documentation.
//
// -----------------------------------------------------------------------------
// Implementation notes
// -----------------------------------------------------------------------------
//
// If you're reading the implementation, before proceeding further, you should
// read the top comment of base/bind_internal.h for a definition of common
// terms and concepts.
namespace base {
// Bind as OnceCallback.
template <typename Functor, typename... Args>
inline OnceCallback<internal::MakeUnboundRunType<Functor, Args...>> BindOnce(
Functor&& functor,
Args&&... args) {
static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
(std::is_rvalue_reference<Functor&&>() &&
!std::is_const<std::remove_reference_t<Functor>>()),
"BindOnce requires non-const rvalue for OnceCallback binding."
" I.e.: base::BindOnce(std::move(callback)).");
static_assert(
std::conjunction<
internal::AssertBindArgIsNotBasePassed<std::decay_t<Args>>...>::value,
"Use std::move() instead of base::Passed() with base::BindOnce()");
return internal::BindImpl<OnceCallback>(std::forward<Functor>(functor),
std::forward<Args>(args)...);
}
// Bind as RepeatingCallback.
template <typename Functor, typename... Args>
inline RepeatingCallback<internal::MakeUnboundRunType<Functor, Args...>>
BindRepeating(Functor&& functor, Args&&... args) {
static_assert(
!internal::IsOnceCallback<std::decay_t<Functor>>(),
"BindRepeating cannot bind OnceCallback. Use BindOnce with std::move().");
return internal::BindImpl<RepeatingCallback>(std::forward<Functor>(functor),
std::forward<Args>(args)...);
}
// Overloads to allow nicer compile errors when attempting to pass the address
// an overloaded function to `BindOnce()` or `BindRepeating()`. Otherwise, clang
// provides only the error message "no matching function [...] candidate
// template ignored: couldn't infer template argument 'Functor'", with no
// reference to the fact that `&` is being used on an overloaded function.
//
// These overloads to provide better error messages will never be selected
// unless template type deduction fails because of how overload resolution
// works; per [over.ics.rank/2.2]:
//
// When comparing the basic forms of implicit conversion sequences (as defined
// in [over.best.ics])
// - a standard conversion sequence is a better conversion sequence than a
// user-defined conversion sequence or an ellipsis conversion sequence, and
// - a user-defined conversion sequence is a better conversion sequence than
// an ellipsis conversion sequence.
//
// So these overloads will only be selected as a last resort iff template type
// deduction fails.
//
// These overloads also intentionally do not return `void`, as this prevents
// clang from emitting spurious errors such as "variable has incomplete type
// 'void'" when assigning the result of `BindOnce()`/`BindRepeating()` to a
// variable with type `auto` or `decltype(auto)`.
struct BindFailedCheckPreviousErrors {};
BindFailedCheckPreviousErrors BindOnce(...);
BindFailedCheckPreviousErrors BindRepeating(...);
// Unretained() allows binding a non-refcounted class, and to disable
// refcounting on arguments that are refcounted objects.
//
// EXAMPLE OF Unretained():
//
// class Foo {
// public:
// void func() { cout << "Foo:f" << endl; }
// };
//
// // In some function somewhere.
// Foo foo;
// OnceClosure foo_callback =
// BindOnce(&Foo::func, Unretained(&foo));
// std::move(foo_callback).Run(); // Prints "Foo:f".
//
// Without the Unretained() wrapper on |&foo|, the above call would fail
// to compile because Foo does not support the AddRef() and Release() methods.
template <typename T>
inline internal::UnretainedWrapper<T> Unretained(T* o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(const raw_ptr<T, I>& o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(raw_ptr<T, I>&& o) {
return internal::UnretainedWrapper<T>(std::move(o));
}
template <typename T, typename I>
inline auto Unretained(const raw_ref<T, I>& o) {
return internal::UnretainedRefWrapper(o);
}
template <typename T, typename I>
inline auto Unretained(raw_ref<T, I>&& o) {
return internal::UnretainedRefWrapper(std::move(o));
}
// RetainedRef() accepts a ref counted object and retains a reference to it.
// When the callback is called, the object is passed as a raw pointer.
//
// EXAMPLE OF RetainedRef():
//
// void foo(RefCountedBytes* bytes) {}
//
// scoped_refptr<RefCountedBytes> bytes = ...;
// OnceClosure callback = BindOnce(&foo, base::RetainedRef(bytes));
// std::move(callback).Run();
//
// Without RetainedRef, the scoped_refptr would try to implicitly convert to
// a raw pointer and fail compilation:
//
// OnceClosure callback = BindOnce(&foo, bytes); // ERROR!
template <typename T>
inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
return internal::RetainedRefWrapper<T>(o);
}
template <typename T>
inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
return internal::RetainedRefWrapper<T>(std::move(o));
}
// Owned() transfers ownership of an object to the callback resulting from
// bind; the object will be deleted when the callback is deleted.
//
// EXAMPLE OF Owned():
//
// void foo(int* arg) { cout << *arg << endl }
//
// int* pn = new int(1);
// RepeatingClosure foo_callback = BindRepeating(&foo, Owned(pn));
//
// foo_callback.Run(); // Prints "1"
// foo_callback.Run(); // Prints "1"
// *pn = 2;
// foo_callback.Run(); // Prints "2"
//
// foo_callback.Reset(); // |pn| is deleted. Also will happen when
// // |foo_callback| goes out of scope.
//
// Without Owned(), someone would have to know to delete |pn| when the last
// reference to the callback is deleted.
template <typename T>
inline internal::OwnedWrapper<T> Owned(T* o) {
return internal::OwnedWrapper<T>(o);
}
template <typename T, typename Deleter>
inline internal::OwnedWrapper<T, Deleter> Owned(
std::unique_ptr<T, Deleter>&& ptr) {
return internal::OwnedWrapper<T, Deleter>(std::move(ptr));
}
// OwnedRef() stores an object in the callback resulting from
// bind and passes a reference to the object to the bound function.
//
// EXAMPLE OF OwnedRef():
//
// void foo(int& arg) { cout << ++arg << endl }
//
// int counter = 0;
// RepeatingClosure foo_callback = BindRepeating(&foo, OwnedRef(counter));
//
// foo_callback.Run(); // Prints "1"
// foo_callback.Run(); // Prints "2"
// foo_callback.Run(); // Prints "3"
//
// cout << counter; // Prints "0", OwnedRef creates a copy of counter.
//
// Supports OnceCallbacks as well, useful to pass placeholder arguments:
//
// void bar(int& ignore, const std::string& s) { cout << s << endl }
//
// OnceClosure bar_callback = BindOnce(&bar, OwnedRef(0), "Hello");
//
// std::move(bar_callback).Run(); // Prints "Hello"
//
// Without OwnedRef() it would not be possible to pass a mutable reference to an
// object owned by the callback.
template <typename T>
internal::OwnedRefWrapper<std::decay_t<T>> OwnedRef(T&& t) {
return internal::OwnedRefWrapper<std::decay_t<T>>(std::forward<T>(t));
}
// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
// through a RepeatingCallback. Logically, this signifies a destructive transfer
// of the state of the argument into the target function. Invoking
// RepeatingCallback::Run() twice on a callback that was created with a Passed()
// argument will CHECK() because the first invocation would have already
// transferred ownership to the target function.
//
// Note that Passed() is not necessary with BindOnce(), as std::move() does the
// same thing. Avoid Passed() in favor of std::move() with BindOnce().
//
// EXAMPLE OF Passed():
//
// void TakesOwnership(std::unique_ptr<Foo> arg) { }
// std::unique_ptr<Foo> CreateFoo() { return std::make_unique<Foo>();
// }
//
// auto f = std::make_unique<Foo>();
//
// // |cb| is given ownership of Foo(). |f| is now NULL.
// // You can use std::move(f) in place of &f, but it's more verbose.
// RepeatingClosure cb = BindRepeating(&TakesOwnership, Passed(&f));
//
// // Run was never called so |cb| still owns Foo() and deletes
// // it on Reset().
// cb.Reset();
//
// // |cb| is given a new Foo created by CreateFoo().
// cb = BindRepeating(&TakesOwnership, Passed(CreateFoo()));
//
// // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
// // no longer owns Foo() and, if reset, would not delete Foo().
// cb.Run(); // Foo() is now transferred to |arg| and deleted.
// cb.Run(); // This CHECK()s since Foo() already been used once.
//
// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and is
// best suited for use with the return value of a function or other temporary
// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
// to avoid having to write Passed(std::move(scoper)).
//
// Both versions of Passed() prevent T from being an lvalue reference. The first
// via use of enable_if, and the second takes a T* which will not bind to T&.
template <typename T,
std::enable_if_t<!std::is_lvalue_reference_v<T>>* = nullptr>
inline internal::PassedWrapper<T> Passed(T&& scoper) {
return internal::PassedWrapper<T>(std::move(scoper));
}
template <typename T>
inline internal::PassedWrapper<T> Passed(T* scoper) {
return internal::PassedWrapper<T>(std::move(*scoper));
}
// IgnoreResult() is used to adapt a function or callback with a return type to
// one with a void return. This is most useful if you have a function with,
// say, a pesky ignorable bool return that you want to use with PostTask or
// something else that expect a callback with a void return.
//
// EXAMPLE OF IgnoreResult():
//
// int DoSomething(int arg) { cout << arg << endl; }
//
// // Assign to a callback with a void return type.
// OnceCallback<void(int)> cb = BindOnce(IgnoreResult(&DoSomething));
// std::move(cb).Run(1); // Prints "1".
//
// // Prints "2" on |ml|.
// ml->PostTask(FROM_HERE, BindOnce(IgnoreResult(&DoSomething), 2);
template <typename T>
inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
return internal::IgnoreResultHelper<T>(std::move(data));
}
#if BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
// RetainBlock() is used to adapt an Objective-C block when Automated Reference
// Counting (ARC) is disabled. This is unnecessary when ARC is enabled, as the
// BindOnce and BindRepeating already support blocks then.
//
// EXAMPLE OF RetainBlock():
//
// // Wrap the block and bind it to a callback.
// OnceCallback<void(int)> cb =
// BindOnce(RetainBlock(^(int n) { NSLog(@"%d", n); }));
// std::move(cb).Run(1); // Logs "1".
template <typename R, typename... Args>
base::mac::ScopedBlock<R (^)(Args...)> RetainBlock(R (^block)(Args...)) {
return base::mac::ScopedBlock<R (^)(Args...)>(block,
base::scoped_policy::RETAIN);
}
#endif // BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
} // namespace base
#endif // BASE_BIND_H_ #endif // BASE_BIND_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_FUNCTIONAL_BIND_INTERNAL_H_ #ifndef BASE_BIND_INTERNAL_H_
#define BASE_FUNCTIONAL_BIND_INTERNAL_H_ #define BASE_BIND_INTERNAL_H_
#include <stddef.h> #include <stddef.h>
@ -15,9 +15,9 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/callback_internal.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/functional/callback_internal.h"
#include "base/memory/raw_ptr.h" #include "base/memory/raw_ptr.h"
#include "base/memory/raw_ptr_asan_bound_arg_tracker.h" #include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
#include "base/memory/raw_ptr_asan_service.h" #include "base/memory/raw_ptr_asan_service.h"
@ -88,7 +88,7 @@ namespace internal {
template <typename Functor, typename SFINAE = void> template <typename Functor, typename SFINAE = void>
struct FunctorTraits; struct FunctorTraits;
template <typename T, typename RawPtrType = base::RawPtrBanDanglingIfSupported> template <typename T>
class UnretainedWrapper { class UnretainedWrapper {
public: public:
explicit UnretainedWrapper(T* o) : ptr_(o) {} explicit UnretainedWrapper(T* o) : ptr_(o) {}
@ -103,25 +103,10 @@ class UnretainedWrapper {
template <typename U = T, typename I> template <typename U = T, typename I>
explicit UnretainedWrapper(raw_ptr<U, I>&& o) : ptr_(std::move(o)) {} explicit UnretainedWrapper(raw_ptr<U, I>&& o) : ptr_(std::move(o)) {}
template <typename U, typename I> T* get() const { return ptr_; }
static void ReportIfDangling(const raw_ptr<U, I>& ptr) {
if constexpr (std::is_same_v<RawPtrType,
base::RawPtrBanDanglingIfSupported>) {
ptr.ReportIfDangling();
}
}
template <typename U>
static void ReportIfDangling(U* ptr) {}
T* get() const {
// `ptr_` is either a `raw_ptr` (if `T` is a supported type) or a regular
// C++ pointer otherwise.
ReportIfDangling(ptr_);
return ptr_;
}
private: private:
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// When `MTECheckedPtr` is enabled as the backing implementation of // When `MTECheckedPtr` is enabled as the backing implementation of
// `raw_ptr`, there are too many different types that immediately // `raw_ptr`, there are too many different types that immediately
// cause Chrome to crash. Some of these are inutterable as forward // cause Chrome to crash. Some of these are inutterable as forward
@ -133,18 +118,10 @@ class UnretainedWrapper {
// than `raw_ptr`) when `raw_ptr` is `MTECheckedPtr`. // than `raw_ptr`) when `raw_ptr` is `MTECheckedPtr`.
using ImplType = T*; using ImplType = T*;
#else #else
// `Unretained()` arguments often dangle by design (common design patterns
// consists of managing objects lifetime inside the callbacks themselves using
// stateful information), so disable direct dangling pointer detection of
// `ptr_`.
//
// If the callback is invoked, dangling pointer detection will be triggered
// before invoking the bound functor (unless stated other wise, see
// `UnsafeDangling()`), when retrieving the pointer value via `get()` above.
using ImplType = std::conditional_t<raw_ptr_traits::IsSupportedType<T>::value, using ImplType = std::conditional_t<raw_ptr_traits::IsSupportedType<T>::value,
raw_ptr<T, DisableDanglingPtrDetection>, raw_ptr<T, DanglingUntriaged>,
T*>; T*>;
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
ImplType ptr_; ImplType ptr_;
}; };
@ -169,7 +146,7 @@ class UnretainedRefWrapper {
T& ref_; T& ref_;
}; };
#if !defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #if !defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// Implementation of UnretainedRefWrapper for `T` where raw_ref<T> is supported. // Implementation of UnretainedRefWrapper for `T` where raw_ref<T> is supported.
template <typename T> template <typename T>
class UnretainedRefWrapper<T, true> { class UnretainedRefWrapper<T, true> {
@ -813,41 +790,36 @@ using MakeStorageType = typename StorageTraits<std::decay_t<T>>::Type;
// //
// WeakCalls need special syntax that is applied to the first argument to check // WeakCalls need special syntax that is applied to the first argument to check
// if they should no-op themselves. // if they should no-op themselves.
template <bool is_weak_call, typename ReturnType, size_t... indices> template <bool is_weak_call, typename ReturnType>
struct InvokeHelper; struct InvokeHelper;
template <typename ReturnType, size_t... indices> template <typename ReturnType>
struct InvokeHelper<false, ReturnType, indices...> { struct InvokeHelper<false, ReturnType> {
template <typename Functor, typename BoundArgsTuple, typename... RunArgs> template <typename Functor, typename... RunArgs>
static inline ReturnType MakeItSo(Functor&& functor, static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
BoundArgsTuple&& bound,
RunArgs&&... args) {
using Traits = MakeFunctorTraits<Functor>; using Traits = MakeFunctorTraits<Functor>;
return Traits::Invoke( return Traits::Invoke(std::forward<Functor>(functor),
std::forward<Functor>(functor),
Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
std::forward<RunArgs>(args)...); std::forward<RunArgs>(args)...);
} }
}; };
template <typename ReturnType, size_t... indices> template <typename ReturnType>
struct InvokeHelper<true, ReturnType, indices...> { struct InvokeHelper<true, ReturnType> {
// WeakCalls are only supported for functions with a void return type. // WeakCalls are only supported for functions with a void return type.
// Otherwise, the function result would be undefined if the WeakPtr<> // Otherwise, the function result would be undefined if the WeakPtr<>
// is invalidated. // is invalidated.
static_assert(std::is_void_v<ReturnType>, static_assert(std::is_void_v<ReturnType>,
"weak_ptrs can only bind to methods without return values"); "weak_ptrs can only bind to methods without return values");
template <typename Functor, typename BoundArgsTuple, typename... RunArgs> template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
static inline void MakeItSo(Functor&& functor, static inline void MakeItSo(Functor&& functor,
BoundArgsTuple&& bound, BoundWeakPtr&& weak_ptr,
RunArgs&&... args) { RunArgs&&... args) {
if (!std::get<0>(bound)) if (!weak_ptr)
return; return;
using Traits = MakeFunctorTraits<Functor>; using Traits = MakeFunctorTraits<Functor>;
Traits::Invoke( Traits::Invoke(std::forward<Functor>(functor),
std::forward<Functor>(functor), std::forward<BoundWeakPtr>(weak_ptr),
Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
std::forward<RunArgs>(args)...); std::forward<RunArgs>(args)...);
} }
}; };
@ -890,7 +862,7 @@ struct Invoker<StorageType, R(UnboundArgs...)> {
template <typename Functor, typename BoundArgsTuple, size_t... indices> template <typename Functor, typename BoundArgsTuple, size_t... indices>
static inline R RunImpl(Functor&& functor, static inline R RunImpl(Functor&& functor,
BoundArgsTuple&& bound, BoundArgsTuple&& bound,
std::index_sequence<indices...> seq, std::index_sequence<indices...>,
UnboundArgs&&... unbound_args) { UnboundArgs&&... unbound_args) {
static constexpr bool is_method = MakeFunctorTraits<Functor>::is_method; static constexpr bool is_method = MakeFunctorTraits<Functor>::is_method;
@ -907,18 +879,9 @@ struct Invoker<StorageType, R(UnboundArgs...)> {
IsWeakMethod<is_method, IsWeakMethod<is_method,
std::tuple_element_t<indices, DecayedArgsTuple>...>(); std::tuple_element_t<indices, DecayedArgsTuple>...>();
// Do not `Unwrap()` here, as that immediately triggers dangling pointer return InvokeHelper<is_weak_call, R>::MakeItSo(
// detection. Dangling pointer detection should only be triggered if the std::forward<Functor>(functor),
// callback is not cancelled, but cancellation status is not determined Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
// until later inside the InvokeHelper::MakeItSo specialization for weak
// calls.
//
// Dangling pointers when invoking a cancelled callback are not considered
// a memory safety error because protecting raw pointers usage with weak
// receivers (where the weak receiver usually own the pointed objects) is a
// common and broadly used pattern in the codebase.
return InvokeHelper<is_weak_call, R, indices...>::MakeItSo(
std::forward<Functor>(functor), std::forward<BoundArgsTuple>(bound),
std::forward<UnboundArgs>(unbound_args)...); std::forward<UnboundArgs>(unbound_args)...);
} }
}; };
@ -1529,11 +1492,9 @@ struct BindUnwrapTraits {
} }
}; };
template <typename T, typename ImplType> template <typename T>
struct BindUnwrapTraits<internal::UnretainedWrapper<T, ImplType>> { struct BindUnwrapTraits<internal::UnretainedWrapper<T>> {
static T* Unwrap(const internal::UnretainedWrapper<T, ImplType>& o) { static T* Unwrap(const internal::UnretainedWrapper<T>& o) { return o.get(); }
return o.get();
}
}; };
template <typename T> template <typename T>
@ -1641,4 +1602,4 @@ struct CallbackCancellationTraits<RepeatingCallback<Signature>,
} // namespace base } // namespace base
#endif // BASE_FUNCTIONAL_BIND_INTERNAL_H_ #endif // BASE_BIND_INTERNAL_H_

View File

@ -1,12 +1,337 @@
// Copyright 2022 The Chromium Authors // Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
// //
// TODO(https://crbug.com/1364441): Temporary forwarding header. // NOTE: Header files that do not require the full definition of
// base::{Once,Repeating}Callback or base::{Once,Repeating}Closure should
// #include "base/callback_forward.h" instead of this file.
#ifndef BASE_CALLBACK_H_ #ifndef BASE_CALLBACK_H_
#define BASE_CALLBACK_H_ #define BASE_CALLBACK_H_
#include "base/functional/callback.h" #include <stddef.h>
#include <utility>
#include "base/bind.h"
#include "base/callback_forward.h" // IWYU pragma: export
#include "base/callback_internal.h"
#include "base/check.h"
#include "base/functional/function_ref.h"
#include "base/notreached.h"
#include "base/types/always_false.h"
// -----------------------------------------------------------------------------
// Usage documentation
// -----------------------------------------------------------------------------
//
// Overview:
// A callback is similar in concept to a function pointer: it wraps a runnable
// object such as a function, method, lambda, or even another callback, allowing
// the runnable object to be invoked later via the callback object.
//
// Unlike function pointers, callbacks are created with base::BindOnce() or
// base::BindRepeating() and support partial function application.
//
// A base::OnceCallback may be Run() at most once; a base::RepeatingCallback may
// be Run() any number of times. |is_null()| is guaranteed to return true for a
// moved-from callback.
//
// // The lambda takes two arguments, but the first argument |x| is bound at
// // callback creation.
// base::OnceCallback<int(int)> cb = base::BindOnce([] (int x, int y) {
// return x + y;
// }, 1);
// // Run() only needs the remaining unbound argument |y|.
// printf("1 + 2 = %d\n", std::move(cb).Run(2)); // Prints 3
// printf("cb is null? %s\n",
// cb.is_null() ? "true" : "false"); // Prints true
// std::move(cb).Run(2); // Crashes since |cb| has already run.
//
// Callbacks also support cancellation. A common use is binding the receiver
// object as a WeakPtr<T>. If that weak pointer is invalidated, calling Run()
// will be a no-op. Note that |IsCancelled()| and |is_null()| are distinct:
// simply cancelling a callback will not also make it null.
//
// See //docs/callback.md for the full documentation.
namespace base {
namespace internal {
struct NullCallbackTag {
template <typename Signature>
struct WithSignature {};
};
struct DoNothingCallbackTag {
template <typename Signature>
struct WithSignature {};
};
} // namespace internal
template <typename R, typename... Args>
class OnceCallback<R(Args...)> : public internal::CallbackBase {
public:
using ResultType = R;
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingType<Args>...);
constexpr OnceCallback() = default;
OnceCallback(std::nullptr_t) = delete;
constexpr OnceCallback(internal::NullCallbackTag) : OnceCallback() {}
constexpr OnceCallback& operator=(internal::NullCallbackTag) {
*this = OnceCallback();
return *this;
}
constexpr OnceCallback(internal::NullCallbackTag::WithSignature<RunType>)
: OnceCallback(internal::NullCallbackTag()) {}
constexpr OnceCallback& operator=(
internal::NullCallbackTag::WithSignature<RunType>) {
*this = internal::NullCallbackTag();
return *this;
}
constexpr OnceCallback(internal::DoNothingCallbackTag)
: OnceCallback(BindOnce([](Args... args) {})) {}
constexpr OnceCallback& operator=(internal::DoNothingCallbackTag) {
*this = BindOnce([](Args... args) {});
return *this;
}
constexpr OnceCallback(internal::DoNothingCallbackTag::WithSignature<RunType>)
: OnceCallback(internal::DoNothingCallbackTag()) {}
constexpr OnceCallback& operator=(
internal::DoNothingCallbackTag::WithSignature<RunType>) {
*this = internal::DoNothingCallbackTag();
return *this;
}
explicit OnceCallback(internal::BindStateBase* bind_state)
: internal::CallbackBase(bind_state) {}
OnceCallback(const OnceCallback&) = delete;
OnceCallback& operator=(const OnceCallback&) = delete;
OnceCallback(OnceCallback&&) noexcept = default;
OnceCallback& operator=(OnceCallback&&) noexcept = default;
OnceCallback(RepeatingCallback<RunType> other)
: internal::CallbackBase(std::move(other)) {}
OnceCallback& operator=(RepeatingCallback<RunType> other) {
static_cast<internal::CallbackBase&>(*this) = std::move(other);
return *this;
}
R Run(Args... args) const & {
static_assert(!sizeof(*this),
"OnceCallback::Run() may only be invoked on a non-const "
"rvalue, i.e. std::move(callback).Run().");
NOTREACHED();
}
R Run(Args... args) && {
// Move the callback instance into a local variable before the invocation,
// that ensures the internal state is cleared after the invocation.
// It's not safe to touch |this| after the invocation, since running the
// bound function may destroy |this|.
OnceCallback cb = std::move(*this);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
return f(cb.bind_state_.get(), std::forward<Args>(args)...);
}
// Then() returns a new OnceCallback that receives the same arguments as
// |this|, and with the return type of |then|. The returned callback will:
// 1) Run the functor currently bound to |this| callback.
// 2) Run the |then| callback with the result from step 1 as its single
// argument.
// 3) Return the value from running the |then| callback.
//
// Since this method generates a callback that is a replacement for `this`,
// `this` will be consumed and reset to a null callback to ensure the
// originally-bound functor can be run at most once.
template <typename ThenR, typename... ThenArgs>
OnceCallback<ThenR(Args...)> Then(OnceCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindOnce(
internal::ThenHelper<
OnceCallback, OnceCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
// This overload is required; even though RepeatingCallback is implicitly
// convertible to OnceCallback, that conversion will not used when matching
// for template argument deduction.
template <typename ThenR, typename... ThenArgs>
OnceCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindOnce(
internal::ThenHelper<
OnceCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::OnceCallback to base::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindOnce() is not necessary with base::FunctionRef; is it "
"possible to use a capturing lambda directly? If not, please bring up "
"this use case on #cxx (Slack) or cxx@chromium.org.");
}
};
template <typename R, typename... Args>
class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
public:
using ResultType = R;
using RunType = R(Args...);
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingType<Args>...);
constexpr RepeatingCallback() = default;
RepeatingCallback(std::nullptr_t) = delete;
constexpr RepeatingCallback(internal::NullCallbackTag)
: RepeatingCallback() {}
constexpr RepeatingCallback& operator=(internal::NullCallbackTag) {
*this = RepeatingCallback();
return *this;
}
constexpr RepeatingCallback(internal::NullCallbackTag::WithSignature<RunType>)
: RepeatingCallback(internal::NullCallbackTag()) {}
constexpr RepeatingCallback& operator=(
internal::NullCallbackTag::WithSignature<RunType>) {
*this = internal::NullCallbackTag();
return *this;
}
constexpr RepeatingCallback(internal::DoNothingCallbackTag)
: RepeatingCallback(BindRepeating([](Args... args) {})) {}
constexpr RepeatingCallback& operator=(internal::DoNothingCallbackTag) {
*this = BindRepeating([](Args... args) {});
return *this;
}
constexpr RepeatingCallback(
internal::DoNothingCallbackTag::WithSignature<RunType>)
: RepeatingCallback(internal::DoNothingCallbackTag()) {}
constexpr RepeatingCallback& operator=(
internal::DoNothingCallbackTag::WithSignature<RunType>) {
*this = internal::DoNothingCallbackTag();
return *this;
}
explicit RepeatingCallback(internal::BindStateBase* bind_state)
: internal::CallbackBaseCopyable(bind_state) {}
// Copyable and movable.
RepeatingCallback(const RepeatingCallback&) = default;
RepeatingCallback& operator=(const RepeatingCallback&) = default;
RepeatingCallback(RepeatingCallback&&) noexcept = default;
RepeatingCallback& operator=(RepeatingCallback&&) noexcept = default;
bool operator==(const RepeatingCallback& other) const {
return EqualsInternal(other);
}
bool operator!=(const RepeatingCallback& other) const {
return !operator==(other);
}
R Run(Args... args) const & {
// Keep `bind_state_` alive at least until after the invocation to ensure
// all bound `Unretained` arguments remain protected by MiraclePtr.
auto bind_state_protector = this->bind_state_;
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
return f(this->bind_state_.get(), std::forward<Args>(args)...);
}
R Run(Args... args) && {
// Move the callback instance into a local variable before the invocation,
// that ensures the internal state is cleared after the invocation.
// It's not safe to touch |this| after the invocation, since running the
// bound function may destroy |this|.
RepeatingCallback cb = std::move(*this);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
return f(std::move(cb).bind_state_.get(), std::forward<Args>(args)...);
}
// Then() returns a new RepeatingCallback that receives the same arguments as
// |this|, and with the return type of |then|. The
// returned callback will:
// 1) Run the functor currently bound to |this| callback.
// 2) Run the |then| callback with the result from step 1 as its single
// argument.
// 3) Return the value from running the |then| callback.
//
// If called on an rvalue (e.g. std::move(cb).Then(...)), this method
// generates a callback that is a replacement for `this`. Therefore, `this`
// will be consumed and reset to a null callback to ensure the
// originally-bound functor will be run at most once.
template <typename ThenR, typename... ThenArgs>
RepeatingCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) const& {
CHECK(then);
return BindRepeating(
internal::ThenHelper<
RepeatingCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
*this, std::move(then));
}
template <typename ThenR, typename... ThenArgs>
RepeatingCallback<ThenR(Args...)> Then(
RepeatingCallback<ThenR(ThenArgs...)> then) && {
CHECK(then);
return BindRepeating(
internal::ThenHelper<
RepeatingCallback,
RepeatingCallback<ThenR(ThenArgs...)>>::CreateTrampoline(),
std::move(*this), std::move(then));
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::RepeatingCallback to base::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindRepeating() is not necessary with base::FunctionRef; "
"is it possible to use a capturing lambda directly? If not, please "
"bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
};
} // namespace base
#endif // BASE_CALLBACK_H_ #endif // BASE_CALLBACK_H_

View File

@ -1,12 +1,24 @@
// Copyright 2022 The Chromium Authors // Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header.
#ifndef BASE_CALLBACK_FORWARD_H_ #ifndef BASE_CALLBACK_FORWARD_H_
#define BASE_CALLBACK_FORWARD_H_ #define BASE_CALLBACK_FORWARD_H_
#include "base/functional/callback_forward.h" namespace base {
template <typename Signature>
class OnceCallback;
template <typename Signature>
class RepeatingCallback;
// Syntactic sugar to make OnceClosure<void()> and RepeatingClosure<void()>
// easier to declare since they will be used in a lot of APIs with delayed
// execution.
using OnceClosure = OnceCallback<void()>;
using RepeatingClosure = RepeatingCallback<void()>;
} // namespace base
#endif // BASE_CALLBACK_FORWARD_H_ #endif // BASE_CALLBACK_FORWARD_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "base/functional/callback_helpers.h" #include "base/callback_helpers.h"
namespace base { namespace base {

View File

@ -1,12 +1,191 @@
// Copyright 2022 The Chromium Authors // Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
//
// TODO(https://crbug.com/1364441): Temporary forwarding header. // This defines helpful methods for dealing with Callbacks. Because Callbacks
// are implemented using templates, with a class per callback signature, adding
// methods to Callback<> itself is unattractive (lots of extra code gets
// generated). Instead, consider adding methods here.
#ifndef BASE_CALLBACK_HELPERS_H_ #ifndef BASE_CALLBACK_HELPERS_H_
#define BASE_CALLBACK_HELPERS_H_ #define BASE_CALLBACK_HELPERS_H_
#include "base/functional/callback_helpers.h" #include <memory>
#include <ostream>
#include <type_traits>
#include <utility>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/check.h"
namespace base {
namespace internal {
template <typename T>
struct IsBaseCallbackImpl : std::false_type {};
template <typename R, typename... Args>
struct IsBaseCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {};
template <typename R, typename... Args>
struct IsBaseCallbackImpl<RepeatingCallback<R(Args...)>> : std::true_type {};
template <typename T>
struct IsOnceCallbackImpl : std::false_type {};
template <typename R, typename... Args>
struct IsOnceCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {};
} // namespace internal
// IsBaseCallback<T>::value is true when T is any of the Closure or Callback
// family of types.
template <typename T>
using IsBaseCallback = internal::IsBaseCallbackImpl<std::decay_t<T>>;
// IsOnceCallback<T>::value is true when T is a OnceClosure or OnceCallback
// type.
template <typename T>
using IsOnceCallback = internal::IsOnceCallbackImpl<std::decay_t<T>>;
// SFINAE friendly enabler allowing to overload methods for both Repeating and
// OnceCallbacks.
//
// Usage:
// template <template <typename> class CallbackType,
// ... other template args ...,
// typename = EnableIfIsBaseCallback<CallbackType>>
// void DoStuff(CallbackType<...> cb, ...);
template <template <typename> class CallbackType>
using EnableIfIsBaseCallback =
std::enable_if_t<IsBaseCallback<CallbackType<void()>>::value>;
namespace internal {
template <typename... Args>
class OnceCallbackHolder final {
public:
OnceCallbackHolder(OnceCallback<void(Args...)> callback,
bool ignore_extra_runs)
: callback_(std::move(callback)), ignore_extra_runs_(ignore_extra_runs) {
DCHECK(callback_);
}
OnceCallbackHolder(const OnceCallbackHolder&) = delete;
OnceCallbackHolder& operator=(const OnceCallbackHolder&) = delete;
void Run(Args... args) {
if (subtle::NoBarrier_AtomicExchange(&has_run_, 1)) {
CHECK(ignore_extra_runs_) << "Both OnceCallbacks returned by "
"base::SplitOnceCallback() were run. "
"At most one of the pair should be run.";
return;
}
DCHECK(callback_);
std::move(callback_).Run(std::forward<Args>(args)...);
}
private:
volatile subtle::Atomic32 has_run_ = 0;
base::OnceCallback<void(Args...)> callback_;
const bool ignore_extra_runs_;
};
} // namespace internal
// Wraps the given OnceCallback and returns two OnceCallbacks with an identical
// signature. On first invokation of either returned callbacks, the original
// callback is invoked. Invoking the remaining callback results in a crash.
template <typename... Args>
std::pair<OnceCallback<void(Args...)>, OnceCallback<void(Args...)>>
SplitOnceCallback(OnceCallback<void(Args...)> callback) {
if (!callback) {
// Empty input begets two empty outputs.
return std::make_pair(OnceCallback<void(Args...)>(),
OnceCallback<void(Args...)>());
}
using Helper = internal::OnceCallbackHolder<Args...>;
auto wrapped_once = base::BindRepeating(
&Helper::Run, std::make_unique<Helper>(std::move(callback),
/*ignore_extra_runs=*/false));
return std::make_pair(wrapped_once, wrapped_once);
}
// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
// that the Closure is executed no matter how the current scope exits.
// If you are looking for "ScopedCallback", "CallbackRunner", or
// "CallbackScoper" this is the class you want.
class BASE_EXPORT ScopedClosureRunner {
public:
ScopedClosureRunner();
explicit ScopedClosureRunner(OnceClosure closure);
ScopedClosureRunner(ScopedClosureRunner&& other);
// Runs the current closure if it's set, then replaces it with the closure
// from |other|. This is akin to how unique_ptr frees the contained pointer in
// its move assignment operator. If you need to explicitly avoid running any
// current closure, use ReplaceClosure().
ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
~ScopedClosureRunner();
explicit operator bool() const { return !!closure_; }
// Calls the current closure and resets it, so it wont be called again.
void RunAndReset();
// Replaces closure with the new one releasing the old one without calling it.
void ReplaceClosure(OnceClosure closure);
// Releases the Closure without calling.
[[nodiscard]] OnceClosure Release();
private:
OnceClosure closure_;
};
// Returns a placeholder type that will implicitly convert into a null callback,
// similar to how absl::nullopt / std::nullptr work in conjunction with
// absl::optional and various smart pointer types.
constexpr auto NullCallback() {
return internal::NullCallbackTag();
}
// Returns a placeholder type that will implicitly convert into a callback that
// does nothing, similar to how absl::nullopt / std::nullptr work in conjunction
// with absl::optional and various smart pointer types.
constexpr auto DoNothing() {
return internal::DoNothingCallbackTag();
}
// Similar to the above, but with a type hint. Useful for disambiguating
// among multiple function overloads that take callbacks with different
// signatures:
//
// void F(base::OnceCallback<void()> callback); // 1
// void F(base::OnceCallback<void(int)> callback); // 2
//
// F(base::NullCallbackAs<void()>()); // calls 1
// F(base::DoNothingAs<void(int)>()); // calls 2
template <typename Signature>
constexpr auto NullCallbackAs() {
return internal::NullCallbackTag::WithSignature<Signature>();
}
template <typename Signature>
constexpr auto DoNothingAs() {
return internal::DoNothingCallbackTag::WithSignature<Signature>();
}
// Useful for creating a Closure that will delete a pointer when invoked. Only
// use this when necessary. In most cases MessageLoop::DeleteSoon() is a better
// fit.
template <typename T>
void DeletePointer(T* obj) {
delete obj;
}
} // namespace base
#endif // BASE_CALLBACK_HELPERS_H_ #endif // BASE_CALLBACK_HELPERS_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "base/functional/callback_internal.h" #include "base/callback_internal.h"
#include "base/check.h" #include "base/check.h"
#include "base/notreached.h" #include "base/notreached.h"

View File

@ -5,13 +5,13 @@
// This file contains utility functions and classes that help the // This file contains utility functions and classes that help the
// implementation, and management of the Callback objects. // implementation, and management of the Callback objects.
#ifndef BASE_FUNCTIONAL_CALLBACK_INTERNAL_H_ #ifndef BASE_CALLBACK_INTERNAL_H_
#define BASE_FUNCTIONAL_CALLBACK_INTERNAL_H_ #define BASE_CALLBACK_INTERNAL_H_
#include <utility> #include <utility>
#include "base/base_export.h" #include "base/base_export.h"
#include "base/functional/callback_forward.h" #include "base/callback_forward.h"
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
namespace base { namespace base {
@ -60,7 +60,7 @@ class BASE_EXPORT BindStateBase
MAYBE_VALID, MAYBE_VALID,
}; };
using InvokeFuncStorage = void (*)(); using InvokeFuncStorage = void(*)();
BindStateBase(const BindStateBase&) = delete; BindStateBase(const BindStateBase&) = delete;
BindStateBase& operator=(const BindStateBase&) = delete; BindStateBase& operator=(const BindStateBase&) = delete;
@ -247,4 +247,4 @@ struct ThenHelper<OriginalCallback<OriginalR(OriginalArgs...)>,
} // namespace internal } // namespace internal
} // namespace base } // namespace base
#endif // BASE_FUNCTIONAL_CALLBACK_INTERNAL_H_ #endif // BASE_CALLBACK_INTERNAL_H_

View File

@ -49,9 +49,9 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/callback.h" #include "base/callback.h"
#include "base/callback_internal.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/functional/callback_internal.h"
#include "base/memory/weak_ptr.h" #include "base/memory/weak_ptr.h"
namespace base { namespace base {

View File

@ -99,10 +99,6 @@ class BASE_EXPORT CheckError {
LogMessage* const log_message_; LogMessage* const log_message_;
}; };
#define CHECK_FUNCTION_IMPL(check_function, condition) \
LAZY_CHECK_STREAM(check_function(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \ #if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE) !BUILDFLAG(DCHECK_IS_CONFIGURABLE)
@ -123,22 +119,31 @@ class BASE_EXPORT CheckError {
#else #else
#define CHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::Check(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define CHECK_WILL_STREAM() true #define CHECK_WILL_STREAM() true
#define CHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::Check, condition)
#define PCHECK(condition) \ #define PCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck, condition) LAZY_CHECK_STREAM( \
::logging::CheckError::PCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#endif #endif
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
#define DCHECK(condition) \ #define DCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DCheck, condition) LAZY_CHECK_STREAM( \
::logging::CheckError::DCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#define DPCHECK(condition) \ #define DPCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DPCheck, condition) LAZY_CHECK_STREAM( \
::logging::CheckError::DPCheck(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#else #else

View File

@ -140,7 +140,8 @@ class CheckOpResult {
char* message_ = nullptr; char* message_ = nullptr;
}; };
#if !CHECK_WILL_STREAM() #if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
// Discard log strings to reduce code bloat. // Discard log strings to reduce code bloat.
#define CHECK_OP(name, op, val1, val2) CHECK((val1)op(val2)) #define CHECK_OP(name, op, val1, val2) CHECK((val1)op(val2))

View File

@ -510,7 +510,6 @@ void CommandLine::ParseFromString(StringPieceType command_line) {
if (downlevel_shell32_dll) if (downlevel_shell32_dll)
::FreeLibrary(downlevel_shell32_dll); ::FreeLibrary(downlevel_shell32_dll);
} }
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
void CommandLine::AppendSwitchesAndArguments( void CommandLine::AppendSwitchesAndArguments(
@ -651,7 +650,6 @@ void CommandLine::ParseAsSingleArgument(
single_arg_switch_position + single_arg_switch.length() + 1; single_arg_switch_position + single_arg_switch.length() + 1;
if (arg_position >= raw_command_line_string_.length()) if (arg_position >= raw_command_line_string_.length())
return; return;
has_single_argument_switch_ = true;
const StringPieceType arg = raw_command_line_string_.substr(arg_position); const StringPieceType arg = raw_command_line_string_.substr(arg_position);
if (!arg.empty()) { if (!arg.empty()) {
AppendArgNative(arg); AppendArgNative(arg);

View File

@ -208,11 +208,6 @@ class BASE_EXPORT CommandLine {
// Initialize by parsing the given command line string. // Initialize by parsing the given command line string.
// The program name is assumed to be the first item in the string. // The program name is assumed to be the first item in the string.
void ParseFromString(StringPieceType command_line); void ParseFromString(StringPieceType command_line);
// Returns true if the command line had the --single-argument switch, and
// thus likely came from a Windows shell registration. This is only set if the
// command line is parsed, and is not changed after it is parsed.
bool HasSingleArgumentSwitch() const { return has_single_argument_switch_; }
#endif #endif
// Sets a delegate that's called when we encounter a duplicate switch // Sets a delegate that's called when we encounter a duplicate switch
@ -252,11 +247,6 @@ class BASE_EXPORT CommandLine {
// ParseFromString(). Empty if this command line was not parsed from a string, // ParseFromString(). Empty if this command line was not parsed from a string,
// or if ParseFromString() has finished executing. // or if ParseFromString() has finished executing.
StringPieceType raw_command_line_string_; StringPieceType raw_command_line_string_;
// Set to true if the command line had --single-argument when initially
// parsed. It does not change if the command line mutates after initial
// parsing.
bool has_single_argument_switch_ = false;
#endif #endif
// The singleton CommandLine representing the current process's command line. // The singleton CommandLine representing the current process's command line.

View File

@ -406,13 +406,9 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#define GSL_POINTER #define GSL_POINTER
#endif #endif
// Adds the "logically_const" tag to a symbol's mangled name. The "Mutable // Adds the "logically_const" tag to a symbol's mangled name, which can be
// Constants" check [1] detects instances of constants that aren't in .rodata, // recognized by the "Mutable Constants" check
// e.g. due to a missing `const`. Using this tag suppresses the check for this // (https://chromium.googlesource.com/chromium/src/+/main/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants).
// symbol, allowing it to live outside .rodata without a warning.
//
// [1]:
// https://crsrc.org/c/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants
#if defined(COMPILER_GCC) || defined(__clang__) #if defined(COMPILER_GCC) || defined(__clang__)
#define LOGICALLY_CONST [[gnu::abi_tag("logically_const")]] #define LOGICALLY_CONST [[gnu::abi_tag("logically_const")]]
#else #else

View File

@ -11,6 +11,7 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include "base/as_const.h"
#include "base/check.h" #include "base/check.h"
#include "base/containers/vector_buffer.h" #include "base/containers/vector_buffer.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
@ -528,11 +529,11 @@ class circular_deque {
return buffer_[i - right_size]; return buffer_[i - right_size];
} }
value_type& at(size_type i) { value_type& at(size_type i) {
return const_cast<value_type&>(std::as_const(*this).at(i)); return const_cast<value_type&>(base::as_const(*this).at(i));
} }
value_type& operator[](size_type i) { value_type& operator[](size_type i) {
return const_cast<value_type&>(std::as_const(*this)[i]); return const_cast<value_type&>(base::as_const(*this)[i]);
} }
const value_type& operator[](size_type i) const { return at(i); } const value_type& operator[](size_type i) const { return at(i); }

View File

@ -12,6 +12,7 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include "base/as_const.h"
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/functional/not_fn.h" #include "base/functional/not_fn.h"
@ -950,7 +951,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K> template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(const K& key) auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(const K& key)
-> iterator { -> iterator {
return const_cast_it(std::as_const(*this).find(key)); return const_cast_it(base::as_const(*this).find(key));
} }
template <class Key, class GetKeyFromValue, class KeyCompare, class Container> template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -973,7 +974,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K> template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range( auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
const K& key) -> std::pair<iterator, iterator> { const K& key) -> std::pair<iterator, iterator> {
auto res = std::as_const(*this).equal_range(key); auto res = base::as_const(*this).equal_range(key);
return {const_cast_it(res.first), const_cast_it(res.second)}; return {const_cast_it(res.first), const_cast_it(res.second)};
} }
@ -994,7 +995,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K> template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound( auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const K& key) -> iterator { const K& key) -> iterator {
return const_cast_it(std::as_const(*this).lower_bound(key)); return const_cast_it(base::as_const(*this).lower_bound(key));
} }
template <class Key, class GetKeyFromValue, class KeyCompare, class Container> template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
@ -1015,7 +1016,7 @@ template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K> template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound( auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const K& key) -> iterator { const K& key) -> iterator {
return const_cast_it(std::as_const(*this).upper_bound(key)); return const_cast_it(base::as_const(*this).upper_bound(key));
} }
template <class Key, class GetKeyFromValue, class KeyCompare, class Container> template <class Key, class GetKeyFromValue, class KeyCompare, class Container>

View File

@ -300,8 +300,6 @@ void CPU::Initialize(bool require_branding) {
has_aesni_ = (cpu_info[2] & 0x02000000) != 0; has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0; has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0; has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
has_pku_ = (cpu_info7[2] & 0x00000008) != 0;
} }
// Get the brand string of the cpu. // Get the brand string of the cpu.

View File

@ -102,13 +102,6 @@ class BASE_EXPORT CPU final {
constexpr bool has_bti() const { return false; } constexpr bool has_bti() const { return false; }
#endif #endif
#if defined(ARCH_CPU_X86_FAMILY)
// Memory protection key support for user-mode pages
bool has_pku() const { return has_pku_; }
#else
constexpr bool has_pku() const { return false; }
#endif
#if defined(ARCH_CPU_X86_FAMILY) #if defined(ARCH_CPU_X86_FAMILY)
IntelMicroArchitecture GetIntelMicroArchitecture() const; IntelMicroArchitecture GetIntelMicroArchitecture() const;
#endif #endif
@ -199,9 +192,6 @@ class BASE_EXPORT CPU final {
#if defined(ARCH_CPU_ARM_FAMILY) #if defined(ARCH_CPU_ARM_FAMILY)
bool has_mte_ = false; // Armv8.5-A MTE (Memory Taggging Extension) bool has_mte_ = false; // Armv8.5-A MTE (Memory Taggging Extension)
bool has_bti_ = false; // Armv8.5-A BTI (Branch Target Identification) bool has_bti_ = false; // Armv8.5-A BTI (Branch Target Identification)
#endif
#if defined(ARCH_CPU_X86_FAMILY)
bool has_pku_ = false;
#endif #endif
bool has_non_stop_time_stamp_counter_ = false; bool has_non_stop_time_stamp_counter_ = false;
bool is_running_in_vm_ = false; bool is_running_in_vm_ = false;

View File

@ -19,9 +19,8 @@ namespace {
// Whether to enable a series of optimizations that reduce total CPU // Whether to enable a series of optimizations that reduce total CPU
// utilization. // utilization.
BASE_FEATURE(kReduceCpuUtilization, constexpr Feature kReduceCpuUtilization{"ReduceCpuUtilization",
"ReduceCpuUtilization", FEATURE_DISABLED_BY_DEFAULT};
FEATURE_DISABLED_BY_DEFAULT);
class CpuReductionExperimentSubSampler { class CpuReductionExperimentSubSampler {
public: public:

View File

@ -1,21 +0,0 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define BASE_CXX20_IS_CONSTANT_EVALUATED_H_
namespace base {
// Implementation of C++20's std::is_constant_evaluated.
//
// References:
// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
// - https://wg21.link/meta.const.eval
constexpr bool is_constant_evaluated() noexcept {
return __builtin_is_constant_evaluated();
}
} // namespace base
#endif // BASE_CXX20_IS_CONSTANT_EVALUATED_H_

Some files were not shown because too many files have changed in this diff Show More