Compare commits

..

No commits in common. "31f169a0b21190627ab144906820a4310097e54c" and "9d972a87da01118c2926a54604bdd10b51ea7a4f" have entirely different histories.

1313 changed files with 13310 additions and 22591 deletions

View File

@ -1 +1 @@
109.0.5414.74
108.0.5359.94

View File

@ -58,7 +58,7 @@ default_args = {
# Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 10
fuchsia_target_api_level = 9
devtools_visibility = [ "*" ]
}

View File

@ -134,7 +134,6 @@ Arnaud Mandy <arnaud.mandy@intel.com>
Arnaud Renevier <a.renevier@samsung.com>
Arpita Bahuguna <a.bah@samsung.com>
Arthur Lussos <developer0420@gmail.com>
Artur Akerberg <artur.aker@gmail.com>
Arun Kulkarni <kulkarni.a@samsung.com>
Arun Kumar <arun87.kumar@samsung.com>
Arun Mankuzhi <arun.m@samsung.com>
@ -316,7 +315,6 @@ Dmitry Sokolov <dimanne@gmail.com>
Dominic Farolino <domfarolino@gmail.com>
Dominic Jodoin <dominic.jodoin@gmail.com>
Dominik Röttsches <dominik.rottsches@intel.com>
Dominik Schütz <do.sch.dev@gmail.com>
Don Woodward <woodward@adobe.com>
Donghee Na <corona10@gmail.com>
Dong-hee Na <donghee.na92@gmail.com>
@ -516,7 +514,6 @@ Jan Grulich <grulja@gmail.com>
Jan Keitel <jan.keitel@gmail.com>
Jan Rucka <ruckajan10@gmail.com>
Jan Sauer <jan@jansauer.de>
Jan-Michael Brummer <jan.brummer@tabos.org>
Janusz Majnert <jmajnert@gmail.com>
Janwar Dinata <j.dinata@gmail.com>
Jared Shumway <jaredshumway94@gmail.com>
@ -601,7 +598,6 @@ Jongdeok Kim <jongdeok.kim@navercorp.com>
Jongheon Kim <sapzape@gmail.com>
JongKwon Lee <jongkwon.lee@navercorp.com>
Jongmok Kim <jongmok.kim@navercorp.com>
Jongmok Kim <johny.kimc@gmail.com>
Jongsoo Lee <leejongsoo@gmail.com>
Joone Hur <joone.hur@intel.com>
Joonghun Park <pjh0718@gmail.com>
@ -1452,7 +1448,6 @@ Synaptics <*@synaptics.com>
Tableau Software <*@tableau.com>
Talon Cyber Security Ltd. <*@talon-sec.com>
TeamSpeak Systems GmbH <*@teamspeak.com>
The Browser Company <*@thebrowser.company>
The Chromium Authors <*@chromium.org>
The MathWorks, Inc. <binod.pant@mathworks.com>
THEO Technologies <*@theoplayer.com>

271
src/DEPS
View File

@ -43,7 +43,6 @@ gclient_gn_args = [
'checkout_nacl',
'checkout_openxr',
'checkout_rts_model',
'checkout_src_internal',
'cros_boards',
'cros_boards_with_qemu_images',
'generate_location_tags',
@ -104,10 +103,8 @@ vars = {
# restricted to Googlers only.
'checkout_chromium_fsc_test_dependencies': False,
# By default, check out //clank in chromium instead of src-internal. This is
# part of an internal migration (see http://go/clank>src) and will be removed
# after the migration completes. See https://crbug.com/1315715 for context.
'checkout_clank_via_src_internal': False,
# By default, src-internal checks out //clank.
'checkout_clank_via_src_internal': True,
# By default, do not check out Google Benchmark. The library is only used by a
# few specialized benchmarks that most developers do not interact with. Will
@ -174,6 +171,7 @@ vars = {
# support for other platforms may be added in the future.
'checkout_openxr' : 'checkout_win',
'checkout_traffic_annotation_tools': 'checkout_configuration != "small"',
'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration != "small"',
# By default bot checkouts the WPR archive files only when this
@ -187,32 +185,32 @@ vars = {
# Fetches only the SDK boot images that match at least one of the
# entries in a comma-separated list.
# Wildcards are supported (e.g. "qemu.*").
#
# Available images:
# Emulation:
# - core.x64-dfv2
# - terminal.qemu-x64
# - terminal.qemu-arm64
# - workstation.qemu-x64
# - qemu.x64 (pulls terminal.qemu-x64-release)
# - qemu.arm64 (pulls terminal.qemu-arm64-release)
# - workstation.qemu-x64-release
# Hardware:
# - workstation_eng.chromebook-x64
# - workstation_eng.chromebook-x64-dfv2
# - generic.x64 (pulls terminal.x64-debug)
# - generic.arm64 (pulls terminal.arm64-debug)
# - chromebook.x64 (pulls terminal.chromebook-x64-debug)
#
# Since the images are hundreds of MB, default to only downloading the image
# most commonly useful for developers. Bots and developers that need to use
# other images can override this with additional images.
'checkout_fuchsia_boot_images': "terminal.qemu-x64",
'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""',
# other images (e.g., qemu.arm64) can override this with additional images.
'checkout_fuchsia_boot_images': "qemu.x64",
# By default, do not check out files required to run fuchsia tests in
# qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False,
# Revision of Crubit (trunk on 2022-10-15). This should typically be the
# Revision of Crubit (trunk on 2022-08-26). This should typically be the
# same as the revision specified in CRUBIT_REVISION in
# tools/rust/update_rust.py. More details and roll instructions can be
# found in tools/rust/README.md.
'crubit_revision': 'f5cbdf4b54b0e6b9f63a4464a2c901c82e0f0209',
'crubit_revision': '2c34caee7c3b4c2dfbcb0e935efcbc05ebc0f61d',
# By default, download the fuchsia sdk from the public sdk directory.
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
@ -252,7 +250,7 @@ vars = {
# luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:f8f64a8c560d2bf68a3ad1137979d17cffb36d30',
'luci_go': 'git_revision:9f65ffe719f73af390727d369b342c22fa37ea54',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision.
@ -283,7 +281,7 @@ vars = {
'dawn_standalone': False,
# reclient CIPD package version
'reclient_version': 're_client_version:0.85.0.91db7be-gomaip',
'reclient_version': 're_client_version:0.81.1.0853992-gomaip',
# Fetch Rust-related packages.
'use_rust': False,
@ -306,34 +304,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': '9f5ce0f89e2ba479fdb57eebd13204bbed86922b',
'skia_revision': '7c55be996a81ff9c5c66984c9d4ef85d12a44c8c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': '4b4e473387ed62f7fcbc95a3bf05244ea0e76a0a',
'v8_revision': '3155b0d10c058d2a9f1d7bba00ad398b3e03b841',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': 'aa63ea230e0c507e7b4b164a30e502fb17168c17',
'angle_revision': 'ceec659ac60b0c8ee9d9c602ca1a878ec1d3a88f',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': 'dd7bb92b9a7a813ebc2da9fe3f6484c34cc69363',
'swiftshader_revision': 'b22b1b1f2dddcf5eacc8d2a37e7d27f650e1c1e2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': '89aa9f9ebe803480ec697fb39470ed106869e272',
'pdfium_revision': '9d2c662f557544e5edb74a60b52fb297f4c5dfee',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '1ccef4908ce04adc6d246262846f3cd8a111fa44',
'boringssl_revision': '1ee71185a2322dc354bee5e5a0abfb1810a27dc6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:10.20221110.0.1',
'fuchsia_version': 'version:9.20221006.5.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -353,11 +351,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other.
'nacl_revision': '9b5a059df9f0015af4a013b6dc217581abf5ce49',
'nacl_revision': '7d275eb5878e29cf3d7338617c1e0c5e5856195a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': 'dea2e6358b2f963008d447d27564dd79890b61f0',
'freetype_revision': '0b62c1e43dc4b0e3c50662aac757e4f7321e5466',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
@ -365,7 +363,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other.
'harfbuzz_revision': '2822b589bc837fae6f66233e2cf2eef0f6ce8470',
'harfbuzz_revision': '56c467093598ec559a7148b61e112e9de52b7076',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other.
@ -377,7 +375,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': '037b0ac25d75c1274af5aa2e3c8316f81b99d5be',
'catapult_revision': '4793433248183dd073e608f655204d4acfdc7193',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
@ -385,7 +383,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': '74ceee1e9a0f73b817709ee8acdf08608a6f0f6f',
'devtools_frontend_revision': '33bb29b551b54b0ac67025e8b3e0ce69352c9504',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
@ -421,11 +419,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': 'd905232daafd9501ac1ff322b0eafb0cf92137a1',
'dawn_revision': 'c84d06e8603ce9c4b5c8d86e42e9ec0acf3bd689',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': 'a17968ca8acf183aa58529312f7a888d1157075f',
'quiche_revision': 'a338ea8277642f6d78022dc8e3aaed182a804413',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -441,11 +439,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other.
'libavif_revision': 'cd03eef10a7cc6081ee98e3911e21d977a2bf17b',
'libavif_revision': 'de7e6c0d98abcd6843c4a9bf4cee731141dca566',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': '8c8e5d7e6f72dacd0917fdf5a3fab3e5b19ce93c',
'nearby_revision': '4bd0337c105c502de845ba9501ad6e0350f613b9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -457,7 +455,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'cros_components_revision': '97e45e933f8f9f584a4a8caf59b0e517a0dd5324',
'cros_components_revision': 'a0979aacb8744f42ed7abd966a6b0ac7578a73e9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -465,11 +463,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': '1a32724f721e1c3b6c590a07fe4a954344f15e48',
'libcxxabi_revision': '9572e56a12c88c011d504a707ca94952be4664f9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': '86213b7902ad64cad566fc4878024ad26654d56a',
'libunwind_revision': '1111799723f6a003e6f52202b9bf84387c552081',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -485,14 +483,14 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': 'fd5b30c4bcf95ee607caf47ef65c3b96210867a4',
'ffmpeg_revision': 'b9f01c3c54576330b2cf8918c54d5ee5be8faefe',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': '4218f3525ad438b22b0e173d963515a09d143398',
'libcxx_revision': '64d36e572d3f9719c5d75011a718f33f11126851',
# GN CIPD package version.
'gn_version': 'git_revision:1c4151ff5c1d6fbf7fa800b8d4bb34d3abc03a41',
'gn_version': 'git_revision:b9c6c19be95a3863e02f00f1fe403b2502e345b6',
# ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -585,7 +583,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_rust_toolchain/linux-amd64',
'version': 'version:2@1.64.0.cr2',
'version': 'rMU9JFlwRfB-5VEWgDPRFYme5sXSnsHPSE3uQXf1xBQC',
},
],
'dep_type': 'cipd',
@ -596,7 +594,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': 'version:2@2022-11-01',
'version': 'version:2@2022-09-14',
},
],
'dep_type': 'cipd',
@ -665,7 +663,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': 'Tq8hROm9IzgBhhsDQWSUldygcSqjDL6WtdDdH9GR1iEC',
'version': '29MbwZukN0c7nlUhmVKLU6ecK99dCu-ZwYa3ICqbwB0C',
},
],
}
@ -676,7 +674,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'owuQ3SVzmABohvT4jYdn9RVH3gKn107su-8SrUO5Xc0C',
'version': 'E3rEUfkgLutRcZKGPJN_yWoC1G-4rTIhzpXGcsUNqCsC',
},
],
},
@ -687,7 +685,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': '6VUdLZPVaa6OooJAO-6YWG2e7xR413E2OYukrtvM59sC',
'version': 'rqP-urpwa5NOuHhuLVNHyT9d_Psk1xDc8ELSstaIkUUC',
},
],
},
@ -698,7 +696,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'SGIHWG5UocK__HcIUQA3wN_Web_G8Vad6N7Qv_G-E7UC',
'version': '7nSN9jjsZ507lwEcJQKUFM_Z2wHmjJmU3nzo1s-r8-UC',
},
],
},
@ -710,7 +708,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': 'K1bKNzAlTMt_EyMNYVUvaED4IX5DnVlVWtgHRoglNOAC',
'version': 'PAJDUVfx7D-m1AKSaBeX8wQEyyQZcIkgXlD3owmRYk0C',
},
],
},
@ -722,7 +720,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'lRa4lm3yGtDVOKin75GKxFu1BZFF3aNadGkm8gow4iYC',
'version': '_N_FWxzXW2IQSQ8OPbpCphEWyNwYs4LKR2gMQzpjhekC',
},
],
},
@ -733,7 +731,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'EYKR6SE965eKhz9JBv_pfJol97nN5UdqymhpRRU2w2YC',
'version': '2yELAOdPaRyB3HuFsiecHXc4zcXVupx9cLa9ZAh-Z2wC',
},
],
},
@ -744,7 +742,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'MkKzihmknEOqtwuFmCdXA-TsgU0FBh7jWl3LHGLptYcC',
'version': 'vuc_q-ghg3H11b1O-ENURYlDO8hrcpCc4AuN1Expx3gC',
},
],
},
@ -753,7 +751,7 @@ deps = {
'packages': [
{
'package': 'chromium/chrome/test/data/autofill/captured_sites',
'version': '9irVgwFBxE4NZ2CLPRDV_-kx70oN-BhhTAWqdFFBD3oC',
'version': 'JT0XFwfg09pcNqt5n56Ki9hpRPgfGDSuDd1DRGBSpoYC',
}
],
'condition': 'checkout_chromium_autofill_test_dependencies',
@ -816,16 +814,16 @@ deps = {
'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'63df73663f14a3236a32a3a7f006a9b0cf474467',
'cf43b2bf3206ff908b2d17be5baba31b7b19f5d3',
'condition': 'checkout_android and checkout_src_internal and not checkout_clank_via_src_internal',
},
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '5bef1125e998d33bfb101ac0da9a6bc445121955',
'url': Var('chromium_git') + '/website.git' + '@' + '7da061134f35c390ac1549a82704a1762f9a5261',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '99ba3b6ed7b8489899f06a0d602e84fc657e8338',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + 'abd4e95736740cf61d2c63223396e163d3f08415',
'condition': 'checkout_ios',
},
@ -840,7 +838,7 @@ deps = {
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '71dbf5f8aedb89064e40d0004427ea56c359d2f0',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'e7619686aab6b4e438ab51cd3fe03396b2f872c6',
'condition': 'checkout_ios',
},
@ -910,7 +908,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'i5oOPUG4rt853hoMLBIzWPppsZTMIg11aeqwZyfJ0W4C',
'version': 'gjjgFT1JcYKD-SV0nFWRTeGr2kufiafn_rvDI-gFW0QC',
},
],
'dep_type': 'cipd',
@ -921,7 +919,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'j7fpwOzWh0F-x9h9dV5tf36WBes999-jw_Swy93vPvEC',
'version': 'xH8MfShB-S7HYkM3gLOUa916ukoEtDJa-8X1bOwfevsC',
},
],
'dep_type': 'cipd',
@ -932,7 +930,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'ktlIC4pstyfRj9NB4gQNSuNijjGLYkcrsrfkXrzxHt8C',
'version': 'SWCvrm3LQO_Y0XbcVVs0q2CJOVKn0ImNLJ0WPQDKx5YC',
},
],
'dep_type': 'cipd',
@ -1000,7 +998,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': '7fcu-OXUt3YtkM5bJ799qvx1sFSQAWOsynLMt9Z2NR4C',
'version': 'H4XoDJ7V7LZUIhvV2qwFHWYJoIY4MJkGQK-Q2vv-dq4C',
},
],
'condition': 'checkout_android',
@ -1033,7 +1031,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/aapt2',
'version': '-QrdsGmvVhHeoRc5wKCnU2LXEjk1s0ocheitXWf5dhYC',
'version': 'nSnWUNu6ssPA-kPMvFQj4JjDXRWj2iubvvjfT1F6HCMC',
},
],
'condition': 'checkout_android',
@ -1055,7 +1053,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'nrReBqFfE2B7DUv3RveTRIE4K9O3MJft7pP-iRhoRQ4C',
'version': 'IEZQhHFQzO9Ci1QxWZmssKqGmt2r_nCDMKr8t4cKY34C',
},
],
'condition': 'checkout_android',
@ -1066,7 +1064,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'PwX3e0EPv-j0ip-tcNNfhy3YcFHle4vxKVo-IbTCtZYC',
'version': 'DO1bMH_JFEfZXSkAknIs7AfgNh4IwLtJaeMJTdzfuJwC',
},
],
'condition': 'checkout_android',
@ -1077,7 +1075,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '1_tPfyswsXgfbNegip6nTy55E2ASlhsRqA2sKhUo01YC',
'version': 'bUREd_PkCqlp2ww6zmyOLGf0jhqgbnf6GT4V1xkAZ10C',
},
],
'condition': 'checkout_android',
@ -1148,7 +1146,7 @@ deps = {
},
'src/third_party/barhopper': {
'url': 'https://chrome-internal.googlesource.com/chrome/deps/barhopper.git' + '@' + '865bd06ef4a839b0a15d17e38e25f8911e4cdf9f',
'url': 'https://chrome-internal.googlesource.com/chrome/deps/barhopper.git' + '@' + '5830f9acc68275805d60d4b02bf8e1e3c600740d',
'condition': 'checkout_src_internal and checkout_chromeos',
},
@ -1161,7 +1159,7 @@ deps = {
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'e059dad5ea3e781786f155ff9806602a4374b5fa',
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'e085b3b50bde862d0cf3ce4594e3f391bcf5faec',
'src/third_party/byte_buddy': {
'packages': [
@ -1186,7 +1184,7 @@ deps = {
},
'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + 'f4628fda1b370eb238ae69545024d256ca62d719',
Var('chromium_git') + '/cast_core/public' + '@' + '469e045e514c09701ab674d023cbaa6562866f83',
'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1215,7 +1213,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '84153a0e264617a1a9054318ad32d4810f003acd',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '6ebc1b94de0dc73bba385f70ddffab9798fd59e5',
'condition': 'checkout_chromeos',
},
@ -1233,7 +1231,7 @@ deps = {
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'ca1cd5d1bb30fbbd5c8b3eddeac77f0892b38516',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '67bca80707449bad87a17de8c937634ff1ab3272',
'condition': 'checkout_linux',
},
@ -1243,13 +1241,13 @@ deps = {
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '9d351f8b5aa6d0362eae0c6d32b4e9b6ef3207bf',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '2c0a8c736a59044e4acc7be9e172343adc5c4310',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '01820318125c27714865061c28bd1c17e0ff08f4',
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '5b416729821b589991d492f0707a087f5a47bb1f',
'condition': 'checkout_src_internal',
},
@ -1257,7 +1255,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '3bb6a48d8c171cf20b5f8e48bfb4e424fbd4f79e',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b3bf8d6a13585ff248c079402654647d298de60b',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
@ -1285,7 +1283,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + 'af862024c8c8fa0ae07ced05e89013d881b00596',
'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'e3017029647a88eb6f509ee9744012fffeb0d371',
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + '0fe13cb28ce5a3fb81f654b21cb37c9821194962',
# Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': {
@ -1294,7 +1292,7 @@ deps = {
},
'src/third_party/fp16/src':
Var('chromium_git') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91',
Var('chromium_git') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '4dfe081cf6bcd15db339cf2680b9281b8451eeb3',
'src/third_party/gemmlowp/src':
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
@ -1450,7 +1448,7 @@ deps = {
+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', # release 1.9.4
'src/third_party/junit/src': {
'url': Var('chromium_git') + '/external/junit.git' + '@' + '05fe2a64f59127c02135be22f416e91260d6ede6',
'url': Var('chromium_git') + '/external/junit.git' + '@' + '64155f8a9babcfcf4263cf4d08253a1556e75481',
'condition': 'checkout_android',
},
@ -1464,7 +1462,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + 'b42e001a9ca9805aff7aaaa270b364a8298c33b4',
Var('aomedia_git') + '/aom.git' + '@' + '4ebecefe77953f226e620821fe441e24547a121f',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1520,12 +1518,12 @@ deps = {
},
'src/third_party/libunwindstack': {
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '4dbfa0e8c844c8e243b297bc185e54a99ff94f9e',
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '8740b09bd1f8b81bdba92766afcb9df1d6a1f14e',
'condition': 'checkout_android',
},
'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + '5245f6e9cb7e6bb68ab45fe4d8b00bc9b16857e1',
Var('chromium_git') + '/webm/libvpx.git' + '@' + '9d6d0624d7943a09cc0be9df1a7402522989ac1a',
'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
@ -1534,7 +1532,7 @@ deps = {
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + 'fe9ced6e3c8ae6c69bcc3ebb8505a650d2df30e0',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '00950840d1c9bcbb3eb6ebc5aac5793e71166c8b',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1616,7 +1614,7 @@ deps = {
'src/third_party/nasm': {
'url': Var('chromium_git') + '/chromium/deps/nasm.git' + '@' +
'0873b2bae6a5388a1c55deac8456e3c60a47ca08'
'9215e8e1d0fe474ffd3e16c1a07a0f97089e6224'
},
'src/third_party/neon_2_sse/src':
@ -1644,10 +1642,10 @@ deps = {
},
'src/third_party/openh264/src':
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9',
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '9be5eefa2605408a671cc11c695849400caecbbb',
Var('chromium_git') + '/openscreen' + '@' + '940f6edf1274146fa1bfbda146b98d6aa16a0887',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1664,7 +1662,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '10498394b9f4b302ee5f56a08e41e7ba7016be44',
Var('android_git') + '/platform/external/perfetto.git' + '@' + '280f0b23c5c8b98248cf0ccf3d011c4fd4bb74f5',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1681,7 +1679,7 @@ deps = {
},
'src/third_party/quic_trace/src':
Var('chromium_git') + '/external/github.com/google/quic-trace.git' + '@' + 'caa0a6eaba816ecb737f9a70782b7c80b8ac8dbc',
Var('chromium_git') + '/external/github.com/google/quic-trace.git' + '@' + 'c7b993eb750e60c307e82f75763600d9c06a6de1',
'src/third_party/pywebsocket3/src':
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/pywebsocket3.git' + '@' + '50602a14f1b6da17e0b619833a13addc6ea78bc2',
@ -1698,13 +1696,13 @@ deps = {
},
'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '7a65faf439295e941baa6640a717d89c1f13e9cd',
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'd2836d1b1c34c4e330a85a1006201db474bf2c8a',
'src/third_party/r8': {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': '2rVb5rDMAwVnV9ra0CSpHAjMqBI4eDQNz_EQ9XFf_60C',
'version': 'szXK3tCGU7smsNs4r2mGqxme7d9KWLaOk0_ghbCJxUQC',
},
],
'condition': 'checkout_android',
@ -1718,7 +1716,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'qGtBu6TtxyR5XNy4cmsslb7c946YtkZF5_QCjVP-wc8C',
'version': 'Q3q0H5fP-O3El4ZE6Mg7vrySyorEF6YrGFs1gRr_PekC',
},
],
'condition': 'checkout_android',
@ -1742,7 +1740,7 @@ deps = {
},
'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '3286a34cc8de6149ac6844107dfdffac91531e72',
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '841ea4172ba904fe3536789497f9565f2ef64129',
'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1796,20 +1794,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '11eaf0880146cdd6af17b38fb1361dd031ee0f8c',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '14b52bb67edccf9f250085f83cc0e8aad03824f0',
'src/third_party/turbine': {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 'HqLybI_r3dCgRJywsqJ3xkp2D6vQAI4-8D7zdqNiyxcC',
'version': 'rrpgWQ-uylo8c5IPgUVP464LwcVOmt29MqwsR59O_zkC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@16939fbb2bf748d2edd3ecf99eb2d8bf0a3ab382',
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@f310d85385dfddbe1deeb05deda1045593225710',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1846,10 +1844,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '9eb57da4a0252044feac56418f4a306f5c66880a',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'eba1a78f3d741241b0dbee728561b61e9587a686',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + '4e8a5ac68e8a4ae0588f54f2fdb8cbd1eb5fa50d',
Var('webrtc_git') + '/src.git' + '@' + '93081d594f7efff72958a79251f53731b99e902b',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1867,7 +1865,7 @@ deps = {
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a50369c0fdd15f0f35b1a91c964644327a88d480',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'e8f74a9763aa36559980a0c2f37f587794995622',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1876,7 +1874,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'VJBQvPCDWNHadBXHtx4iViP5ju0oF2ZlEK5Zle80ZVUC',
'version': 'c-P40DdzhvukIRQ1DgesE2cEEU8bTLcd4p_e3LL1--sC',
},
],
'dep_type': 'cipd',
@ -1886,7 +1884,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': 'GXa5gfPznAu49j4sXrqFvYWFSD0LTIB_TVal2oqGlAIC',
'version': 'wql7tuE1euGE1rj5JPT6w6ev6KYL3hWzY6HggTHgKZ8C',
},
],
'dep_type': 'cipd',
@ -1897,7 +1895,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': '1EpS2K-VevNOMzNiw1LmYktlqKVv1SyqDm2q4DRQtNkC',
'version': 'OJJWEma6n1Cw5Ja1DQfdwbOFoFVp6071BB8VjklDcyYC',
},
],
'dep_type': 'cipd',
@ -1908,7 +1906,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'SXjqn6JoFOUmIaVPnyQWa19dyUeEb00rdwMuL9feyvoC',
'version': '9sTZ5XDqsy_Dj_v4NU3u4fLI_AGANp-zAJ3sof4rkwQC',
},
],
'dep_type': 'cipd',
@ -1919,7 +1917,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@9d9a9db6849a20f032629df0ffd56f1e964a330f',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@a63cd655ad37984fa08e1c95ca73acf55550f10d',
'condition': 'checkout_src_internal',
},
@ -1927,7 +1925,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/assistant/ambient',
'version': 'version:feel_the_breeze_with_frame_rate_markers',
'version': 'version:feel_the_breeze_tree_shadow_improvement',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1938,7 +1936,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'DGjNF7K8HNgqAoCrNTLYaL1cwGsGpgyMYJHLSIrCkQsC',
'version': '9yLWNtuRvV_dzod1dEYo01glLiFRGZ2yqhtYQapXSm4C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1949,7 +1947,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': 'nuBoTiVJnFg80DmCLFis-iioSzVfiEk4pK-juQ656QIC',
'version': '5MAo0K1bcfWGI4F8OuSplMAOM13HLHbGLL85j8dVU7AC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1960,7 +1958,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'fDMFUaVLxpDUliwvnwpNTNwhgVxwtX0LF2diuVfN4FcC',
'version': 'HfCwnAI0440kMmt917E1v9QJdzsNuNVfQQ86ehaVDscC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1971,7 +1969,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': 'BTIRR9os86YFvgh31Ufk0iak_hl3Zn-G19MwY5yMFFsC',
'version': 'TaHxBUmYiVurXIHHo8Y5mOh7-SEnHbSCW7fn60_Wm54C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -3100,6 +3098,17 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java',
'version': 'version:2@2.0.3.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_googlejavaformat_google_java_format': {
'packages': [
{
@ -3862,11 +3871,13 @@ include_rules = [
'-third_party/shell-encryption',
'-third_party/private_membership',
# Abseil features are allowlisted explicitly. See
# //styleguide/c++/c++-features.md.
# Abseil features must be allowlisted explicitly for now. See
# //styleguide/c++/c++11.html. Allowed features' headers will be listed
# explicitly here.
'-absl',
'-third_party/abseil-cpp',
'+third_party/abseil-cpp/absl/base/attributes.h',
"+third_party/abseil-cpp/absl/functional/function_ref.h",
"+third_party/abseil-cpp/absl/numeric/int128.h",
'+third_party/abseil-cpp/absl/types/optional.h',
'+third_party/abseil-cpp/absl/types/variant.h',
@ -4119,11 +4130,10 @@ hooks = [
},
{
# Should run after the clang hook. Used on mac, as well as for orderfile
# generation and size tooling on Android. Used by
# dump-static-initializers.py on linux.
# generation on Android.
'name': 'objdump',
'pattern': '.',
'condition': 'checkout_linux or checkout_mac or checkout_android and host_os != "mac"',
'condition': 'checkout_mac or checkout_android and host_os != "mac"',
'action': ['python3', 'src/tools/clang/scripts/update.py',
'--package=objdump'],
},
@ -4506,6 +4516,38 @@ hooks = [
],
},
# This is used to ensure that all network operations are properly
# annotated so we can document what they're for.
{
'name': 'tools_traffic_annotation_linux',
'pattern': '.',
'condition': 'host_os == "linux" and checkout_traffic_annotation_tools',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--num_threads=4',
'--bucket', 'chromium-tools-traffic_annotation',
'-d', 'src/tools/traffic_annotation/bin/linux64',
],
},
# This is used to ensure that all network operations are properly
# annotated so we can document what they're for.
{
'name': 'tools_traffic_annotation_windows',
'pattern': '.',
'condition': 'host_os == "win" and checkout_traffic_annotation_tools',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--num_threads=4',
'--bucket', 'chromium-tools-traffic_annotation',
'-d', 'src/tools/traffic_annotation/bin/win32',
],
},
# Pull down Zucchini test data.
{
'name': 'zucchini_testdata',
@ -4689,11 +4731,12 @@ hooks = [
{
'name': 'Download Fuchsia system images',
'pattern': '.',
'condition': 'checkout_fuchsia and checkout_fuchsia_product_bundles',
'condition': 'checkout_fuchsia',
'action': [
'python3',
'src/build/fuchsia/update_product_bundles.py',
'{checkout_fuchsia_boot_images}',
'src/build/fuchsia/update_images.py',
'--boot-images={checkout_fuchsia_boot_images}',
'--default-bucket={fuchsia_images_bucket}',
],
},
@ -4959,7 +5002,7 @@ recursedeps = [
'src/third_party/vulkan-deps',
# src-internal has its own DEPS file to pull additional internal repos
'src-internal',
# clank has its own DEPS file, does not need to be in trybot_analyze_config
# since the roller does not run tests.
'src/clank',
# clank has its own DEPS file. This needs to be enabled only when it is
# removed from src-internal's recursedeps.
#'src/clank',
]

View File

@ -18,7 +18,6 @@
# huge sequence of random-looking conditionals.
import("//base/allocator/allocator.gni")
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//base/trace_event/tracing.gni")
import("//build/buildflag_header.gni")
import("//build/config/arm.gni")
@ -50,6 +49,9 @@ declare_args() {
# file name) is saved.
enable_location_source = true
# Whether or not the FROM_HERE macro uses base::Location::Current().
from_here_uses_location_builtins = true
# Unsafe developer build. Has developer-friendly features that may weaken or
# disable security measures like sandboxing or ASLR.
# IMPORTANT: Unsafe developer builds should never be distributed to end users.
@ -86,7 +88,7 @@ dep_libevent = !is_fuchsia && !is_win && !is_mac && !is_nacl
# Determines whether message_pump_libevent should be used.
use_libevent = dep_libevent && !is_ios
if (is_android || is_robolectric) {
if (is_android) {
import("//build/config/android/rules.gni")
}
@ -152,12 +154,6 @@ config("perfetto_config") {
}
}
if (enable_pkeys && is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
}
buildflag_header("ios_cronet_buildflags") {
header = "ios_cronet_buildflags.h"
header_dir = "base/message_loop"
@ -363,7 +359,6 @@ mixed_component("base") {
"functional/callback_helpers.h",
"functional/callback_internal.cc",
"functional/callback_internal.h",
"functional/callback_tags.h",
"functional/function_ref.h",
"functional/identity.h",
"functional/invoke.h",
@ -569,9 +564,6 @@ mixed_component("base") {
"power_monitor/sampling_event_source.h",
"power_monitor/timer_sampling_event_source.cc",
"power_monitor/timer_sampling_event_source.h",
"process/current_process.cc",
"process/current_process.h",
"process/current_process_test.h",
"process/environment_internal.cc",
"process/environment_internal.h",
"process/kill.cc",
@ -611,6 +603,8 @@ mixed_component("base") {
"profiler/stack_copier_suspend.h",
"profiler/stack_sampler.cc",
"profiler/stack_sampler.h",
"profiler/stack_sampler_impl.cc",
"profiler/stack_sampler_impl.h",
"profiler/stack_sampling_profiler.cc",
"profiler/stack_sampling_profiler.h",
"profiler/suspendable_thread_delegate.h",
@ -641,8 +635,6 @@ mixed_component("base") {
"scoped_native_library.cc",
"scoped_native_library.h",
"scoped_observation.h",
"scoped_observation_traits.h",
"scoped_observation_traits_internal.h",
"sequence_checker.cc",
"sequence_checker.h",
"sequence_checker_impl.cc",
@ -679,7 +671,6 @@ mixed_component("base") {
"strings/string_util.cc",
"strings/string_util.h",
"strings/string_util_constants.cc",
"strings/string_util_impl_helpers.h",
"strings/string_util_internal.h",
"strings/stringize_macros.h",
"strings/stringprintf.cc",
@ -705,7 +696,6 @@ mixed_component("base") {
"synchronization/lock.cc",
"synchronization/lock.h",
"synchronization/lock_impl.h",
"synchronization/waitable_event.cc",
"synchronization/waitable_event.h",
"synchronization/waitable_event_watcher.h",
"sys_byteorder.h",
@ -775,6 +765,7 @@ mixed_component("base") {
"task/sequence_manager/task_queue_impl.h",
"task/sequence_manager/task_queue_selector.cc",
"task/sequence_manager/task_queue_selector.h",
"task/sequence_manager/task_queue_selector_logic.h",
"task/sequence_manager/task_time_observer.h",
"task/sequence_manager/tasks.cc",
"task/sequence_manager/tasks.h",
@ -898,7 +889,6 @@ mixed_component("base") {
"threading/scoped_thread_priority.cc",
"threading/scoped_thread_priority.h",
"threading/sequence_bound.h",
"threading/sequence_bound_internal.h",
"threading/sequence_local_storage_map.cc",
"threading/sequence_local_storage_map.h",
"threading/sequence_local_storage_slot.cc",
@ -969,7 +959,6 @@ mixed_component("base") {
"traits_bag.h",
"tuple.h",
"types/always_false.h",
"types/cxx23_to_underlying.h",
"types/expected.h",
"types/expected_internal.h",
"types/id_type.h",
@ -1413,6 +1402,7 @@ mixed_component("base") {
"debug/proc_maps_linux.cc",
"debug/proc_maps_linux.h",
"files/dir_reader_linux.h",
"files/file_util_linux.cc",
"files/scoped_file_linux.cc",
"process/internal_linux.cc",
"process/internal_linux.h",
@ -1559,7 +1549,7 @@ mixed_component("base") {
"//third_party/abseil-cpp:absl",
]
if (use_custom_libcxx && enable_safe_libcxx && !is_debug) {
if (use_custom_libcxx && !is_debug) {
public_deps += [ ":nodebug_assertion" ]
}
@ -1613,7 +1603,7 @@ mixed_component("base") {
]
}
if (!use_partition_alloc_as_malloc) {
if (use_allocator == "none") {
if (is_android) {
sources += [ "allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc" ]
}
@ -1739,45 +1729,7 @@ mixed_component("base") {
# This is actually a linker script, but it can be added to the link in the
# same way as a library.
libs += [ "android/library_loader/anchor_functions.lds" ]
} # is_android
if (is_robolectric) {
# Make jni.h available.
configs += [ "//third_party/jdk" ]
}
if (is_robolectric) {
sources += [
"android/base_jni_onload.cc",
"android/base_jni_onload.h",
"android/callback_android.cc",
"android/callback_android.h",
"android/command_line_android.cc",
"android/java_exception_reporter.cc",
"android/java_exception_reporter.h",
"android/jni_android.cc",
"android/jni_android.h",
"android/jni_array.cc",
"android/jni_array.h",
"android/jni_generator/jni_generator_helper.h",
"android/jni_int_wrapper.h",
"android/jni_registrar.cc",
"android/jni_registrar.h",
"android/jni_string.cc",
"android/jni_string.h",
"android/jni_utils.cc",
"android/jni_utils.h",
"android/jni_weak_ref.cc",
"android/jni_weak_ref.h",
"android/library_loader/library_loader_hooks.cc",
"android/library_loader/library_loader_hooks.h",
"android/native_uma_recorder.cc",
"android/scoped_java_ref.cc",
"android/scoped_java_ref.h",
"android/trace_event_binding.cc",
"android/trace_event_binding.h",
]
deps += [ ":base_jni_headers" ]
} # is_android || is_robolectric
# Chromeos.
if (is_chromeos) {
@ -1890,7 +1842,6 @@ mixed_component("base") {
# by public //base headers, which requires they be on the include path.
# TODO(https://crbug.com/841171): Move these back to |deps|.
public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.component.runner",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.intl",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger",
@ -2466,7 +2417,7 @@ mixed_component("base") {
]
}
if (use_partition_alloc_as_malloc) {
if (use_allocator == "partition") {
sources += [
"trace_event/address_space_dump_provider.cc",
"trace_event/address_space_dump_provider.h",
@ -2499,7 +2450,6 @@ buildflag_header("cfi_buildflags") {
flags = [
# TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
"CFI_CAST_CHECK=$is_cfi && $use_cfi_cast",
"CFI_DIAG=$is_cfi && $use_cfi_diag",
"CFI_ICALL_CHECK=$is_cfi && $use_cfi_icall",
"CFI_ENFORCEMENT_TRAP=$is_cfi && !$use_cfi_diag",
"CFI_ENFORCEMENT_DIAGNOSTIC=$is_cfi && $use_cfi_diag && !$use_cfi_recover",
@ -2517,6 +2467,7 @@ buildflag_header("debugging_buildflags") {
flags = [
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_LOCATION_SOURCE=$enable_location_source",
"FROM_HERE_USES_LOCATION_BUILTINS=$from_here_uses_location_builtins",
"ENABLE_PROFILING=$enable_profiling",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"UNSAFE_DEVELOPER_BUILD=$is_unsafe_developer_build",
@ -2669,7 +2620,7 @@ static_library("base_static") {
}
}
if (use_custom_libcxx && enable_safe_libcxx && !is_debug) {
if (use_custom_libcxx && !is_debug) {
# nodebug_assertion.cc has to be in its own source_set instead of being
# included as a source in //base as otherwise its symbols won't be linked in
# if they end up in an archive.

View File

@ -26,10 +26,3 @@ include_rules = [
# //base/util can use //base but not vice versa.
"-util",
]
specific_include_rules = {
# Special case
"process/current_process(|_test)\.h": [
"+third_party/perfetto/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h",
],
}

View File

@ -3,29 +3,40 @@
# found in the LICENSE file.
import("//base/allocator/allocator.gni")
import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//build/buildflag_header.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
buildflag_header("buildflags") {
header = "buildflags.h"
_use_partition_alloc_as_malloc = use_allocator == "partition"
assert(use_allocator_shim || !_use_partition_alloc_as_malloc,
"Partition alloc requires the allocator shim")
assert(
!_use_partition_alloc_as_malloc ||
enable_partition_alloc_as_malloc_support,
"Partition alloc as malloc requires enable_partition_alloc_as_malloc_support=true")
assert(use_allocator_shim || !use_partition_alloc_as_malloc,
"PartitionAlloc-Everywhere requires the allocator shim")
# BackupRefPtr(BRP) build flags.
_use_backup_ref_ptr = use_backup_ref_ptr && use_partition_alloc && !is_nacl
_use_partition_alloc_as_gwp_asan_store = _use_backup_ref_ptr
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
flags = [
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc_as_malloc",
"USE_BACKUP_REF_PTR=$use_backup_ref_ptr",
"USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$use_backup_ref_ptr",
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$_use_partition_alloc_as_gwp_asan_store",
# Not to be used by itself - use in combination with
# defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
"USE_MTE_CHECKED_PTR=$use_mte_checked_ptr",
# Not to be used directly - see `partition_alloc_config.h`.
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
]

View File

@ -1,7 +1,7 @@
This document describes how malloc / new calls are routed in the various Chrome
platforms.
Bear in mind that the chromium codebase does not always just use `malloc()`.
Bare in mind that the chromium codebase does not always just use `malloc()`.
Some examples:
- Large parts of the renderer (Blink) use two home-brewed allocators,
PartitionAlloc and BlinkGC (Oilpan).
@ -15,13 +15,29 @@ Background
----------
The `allocator` target defines at compile-time the platform-specific choice of
the allocator and extra-hooks which services calls to malloc/new. The relevant
build-time flags involved are `use_allocator_shim` and
`use_partition_alloc_as_malloc`.
build-time flags involved are `use_allocator` and `use_allocator_shim`.
By default, these are true on all platforms except iOS (not yet supported) and
NaCl (no plan to support).
Furthermore, when building with a sanitizer (e.g. `asan`, `msan`, ...) both the
allocator and the shim layer are disabled.
The default choices are as follows:
**Windows**
`use_allocator: winheap`, the default Windows heap.
Additionally, `static_library` (i.e. non-component) builds have a shim
layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
The shim layer provides extra security features, such as preventing large
allocations that can hit signed vs. unsigned bugs in third_party code.
**Android**
`use_allocator: none`, always use the allocator symbols coming from Android's
libc (Bionic). As it is developed as part of the OS, it is considered to be
optimized for small devices and more memory-efficient than other choices.
The actual implementation backing malloc symbols in Bionic is up to the board
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
In addition, when building for `asan` / `msan` both the allocator and the shim
layer are disabled.
Layering and build deps
@ -70,7 +86,7 @@ a central place.
- Full documentation: [Allocator shim design doc][url-allocator-shim].
- Current state: Available and enabled by default on Android, CrOS, Linux,
Mac OS and Windows.
- Tracking bug: [crbug.com/550886](https://crbug.com/550886).
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
- Build-time flag: `use_allocator_shim`.
**Overview of the unified allocator shim**

View File

@ -15,11 +15,29 @@ declare_args() {
# Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = use_allocator_shim_default
# Whether PartitionAlloc should be available for use or not.
# true makes PartitionAlloc linked to the executable or shared library and
# makes it available for use. It doesn't mean that the default allocator
# is PartitionAlloc, which is governed by |use_allocator|.
#
# This flag is currently set to false only on Cronet bots, because Cronet
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
# size increase (crbug.com/674570).
use_partition_alloc = true
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
# binary size. This flag can be used to enable it for official builds too.
force_enable_raw_ptr_exclusion = false
}
if (!use_partition_alloc && use_allocator == "partition") {
# If there is a conflict, prioritize |use_partition_alloc| over
# |use_allocator|.
use_allocator = "none"
}
assert(use_allocator == "none" || use_allocator == "partition")
assert(
!use_allocator_shim || is_linux || is_chromeos || is_android || is_win ||
is_fuchsia || is_apple,
@ -32,38 +50,27 @@ if (is_win && use_allocator_shim) {
"The allocator shim doesn't work for the component build on Windows.")
}
# |use_backup_ref_ptr| and |use_mte_checked_ptr|, if true, compile
# BackupRefPtrImpl and MTECheckedPtrImp, respectively, as the implementation of
# raw_ptr<T>. They're mutually exclusive.
#
# To enable them, we need:
# - PartitionAlloc to be compiled in to begin with,
# - BackupRefPtr/MTECheckedPtr support to be compiled into PartitionAlloc,
# - PartitionAlloc-Everywhere to be enabled(*),
# - not be built for NaCl, as it doesn't use PartitionAlloc-Everywhere (but is
# somehow part of the build process with all other non-NaCl goods); moreover,
# MTECheckedPtr works only with 64-bit pointers, which NaCl doesn't have.
#
# Note that making BackupRefPtrImpl the implementation of raw_ptr<T> doesn't
# necessarily enable BackupRefPtr (BRP) protection. One has to additionally
# enable PartitionAllocBackupRefPtr feature flag.
#
# (*) In theory, BRP/MTECheckedPtr will work just fine without
# PartitionAlloc-Everywhere, but their scope would be limited to partitions
# that are invoked explicitly (not via malloc). These are only Blink
# partition, where we currently don't even use raw_ptr<T>.
if (!use_partition_alloc_as_malloc) {
# Chromium-specific asserts. External embedders _may_ elect to use
# specific `raw_ptr<T>` implementations even without PA-E.
assert(!enable_backup_ref_ptr_support,
"Chromium does not use BRP without PA-E")
assert(!enable_mte_checked_ptr_support,
"Chromium does not use MTECheckedPtr without PA-E")
declare_args() {
# If we use PartitionAlloc as default allocator and enable its MTECheckedPtr
# support as default, we can use_mte_checked_ptr=true as default.
use_mte_checked_ptr = enable_mte_checked_ptr_support_default &&
use_partition_alloc && use_allocator == "partition"
}
use_backup_ref_ptr = use_partition_alloc && enable_backup_ref_ptr_support &&
use_partition_alloc_as_malloc && !is_nacl
use_mte_checked_ptr = use_partition_alloc && enable_mte_checked_ptr_support &&
use_partition_alloc_as_malloc && !is_nacl
declare_args() {
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
# of raw_ptr<T>, and enable PartitionAlloc support for it.
# We also disable BRP in the presence of MTECheckedPtr, which is almost
# never enabled.
use_backup_ref_ptr = enable_backup_ref_ptr_support_default &&
use_partition_alloc && use_allocator == "partition"
}
assert(!use_backup_ref_ptr || enable_backup_ref_ptr_support,
"BackupRefPtr needs enable_backup_ref_ptr_support.")
assert(!use_mte_checked_ptr || enable_mte_checked_ptr_support,
"MTECheckedPtr needs enable_mte_checked_ptr_support.")
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
"MTECheckedPtr conflicts with BRP.")
@ -75,6 +82,15 @@ declare_args() {
use_asan_backup_ref_ptr = is_asan && (is_win || is_android || is_linux)
}
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.
# In theory, such a configuration is possible, but its scope would be limited to
# only Blink partitions, which is currently not tested. Better to trigger an
# error, than have BackupRefPtr silently disabled while believing it is enabled.
if (!is_nacl) {
assert(!use_backup_ref_ptr || use_allocator == "partition",
"Can't use BackupRefPtr without PartitionAlloc-Everywhere")
}
# BackupRefPtr and AsanBackupRefPtr are mutually exclusive variants of raw_ptr.
assert(
!use_backup_ref_ptr || !use_asan_backup_ref_ptr,

View File

@ -36,8 +36,7 @@ template <typename CheckObserverPredicate,
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
CheckObserverPredicate check_observer) {
([](bool b) { DCHECK(b); }(check_observer(std::get<Indices>(observers))),
...);
((DCHECK(check_observer(std::get<Indices>(observers)))), ...);
}
template <typename... ObserverTypes, size_t... Indices>

View File

@ -367,12 +367,6 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
trials.emplace("DanglingPointerDetector", "Enabled");
#else
trials.emplace("DanglingPointerDetector", "Disabled");
#endif
return trials;
}
@ -509,7 +503,7 @@ void DanglingRawPtrReleasedCrash(uintptr_t id) {
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release;
}
ImmediateCrash();
IMMEDIATE_CRASH();
}
void ClearDanglingRawPtrBuffer() {
@ -563,7 +557,7 @@ void UnretainedDanglingRawPtrDetectedCrash(uintptr_t id) {
LOG(ERROR) << "Detected dangling raw_ptr in unretained with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< task_trace << stack_trace;
ImmediateCrash();
IMMEDIATE_CRASH();
}
void InstallUnretainedDanglingRawPtrChecks() {

View File

@ -5,6 +5,3 @@
# The python interpreter to use by default. On Windows, this will look
# for python3.exe and python3.bat.
script_executable = "python3"
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"

View File

@ -40,12 +40,6 @@ if (is_fuchsia) {
}
}
if (enable_pkeys && is_debug) {
config("no_stack_protector") {
cflags = [ "-fno-stack-protector" ]
}
}
component("partition_alloc") {
sources = [
"address_pool_manager.cc",
@ -80,7 +74,6 @@ component("partition_alloc") {
"partition_alloc.cc",
"partition_alloc.h",
"partition_alloc_base/atomic_ref_count.h",
"partition_alloc_base/augmentations/compiler_specific.h",
"partition_alloc_base/bit_cast.h",
"partition_alloc_base/bits.h",
"partition_alloc_base/check.cc",
@ -132,8 +125,6 @@ component("partition_alloc") {
"partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/win_handle_types.h",
"partition_alloc_base/win/win_handle_types_list.inc",
"partition_alloc_base/win/windows_types.h",
"partition_alloc_check.h",
"partition_alloc_config.h",
@ -162,8 +153,6 @@ component("partition_alloc") {
"partition_tag_bitmap.h",
"partition_tag_types.h",
"partition_tls.h",
"pkey.cc",
"pkey.h",
"random.cc",
"random.h",
"reservation_offset_table.cc",
@ -357,12 +346,6 @@ component("partition_alloc") {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_speed" ]
}
# We want to be able to test pkey mode without access to the default pkey.
# This is incompatible with stack protectors since the TLS won't be pkey-tagged.
if (enable_pkeys && is_debug) {
configs += [ ":no_stack_protector" ]
}
}
buildflag_header("partition_alloc_buildflags") {
@ -376,8 +359,6 @@ buildflag_header("partition_alloc_buildflags") {
enable_backup_ref_ptr_slow_checks && _enable_backup_ref_ptr_support
_enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks && _enable_backup_ref_ptr_support
_enable_dangling_raw_ptr_perf_experiment =
enable_dangling_raw_ptr_perf_experiment && _enable_dangling_raw_ptr_checks
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
@ -404,22 +385,18 @@ buildflag_header("partition_alloc_buildflags") {
# defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT=$use_partition_alloc_as_malloc",
"ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT=$enable_partition_alloc_as_malloc_support",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$_enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$_enable_dangling_raw_ptr_perf_experiment",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
"ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support",
# Not to be used directly - instead use
# defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
"ENABLE_MTE_CHECKED_PTR_SUPPORT=$_enable_mte_checked_ptr_support",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$glue_core_pools",
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$_enable_shadow_metadata_for_64_bits_pointers",
# *Scan is currently only used by Chromium.

View File

@ -5,8 +5,6 @@
# PartitionAlloc is planned to be extracted into a standalone library, and
# therefore dependencies need to be strictly controlled and minimized.
gclient_gn_args_file = 'partition_allocator/build/config/gclient_args.gni'
# Only these hosts are allowed for dependencies in this DEPS file.
# This is a subset of chromium/src/DEPS's allowed_hosts.
allowed_hosts = [
@ -18,13 +16,8 @@ vars = {
}
deps = {
'partition_allocator/build':
Var('chromium_git') + '/chromium/src/build.git',
'partition_allocator/buildtools':
Var('chromium_git') + '/chromium/src/buildtools.git',
'partition_allocator/buildtools/clang_format/script':
Var('chromium_git') +
'/external/github.com/llvm/llvm-project/clang/tools/clang-format.git',
Var('chromium_git') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git',
'partition_allocator/buildtools/linux64': {
'packages': [
{
@ -55,79 +48,8 @@ deps = {
'dep_type': 'cipd',
'condition': 'host_os == "win"',
},
'partition_allocator/buildtools/third_party/libc++/trunk':
Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git',
'partition_allocator/buildtools/third_party/libc++abi/trunk':
Var('chromium_git') +
'/external/github.com/llvm/llvm-project/libcxxabi.git',
'partition_allocator/tools/clang':
Var('chromium_git') + '/chromium/src/tools/clang.git',
}
hooks = [
{
'name': 'sysroot_arm',
'pattern': '.',
'condition': 'checkout_linux and checkout_arm',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm'],
},
{
'name': 'sysroot_arm64',
'pattern': '.',
'condition': 'checkout_linux and checkout_arm64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm64'],
},
{
'name': 'sysroot_x86',
'pattern': '.',
'condition': 'checkout_linux and (checkout_x86 or checkout_x64)',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x86'],
},
{
'name': 'sysroot_mips',
'pattern': '.',
'condition': 'checkout_linux and checkout_mips',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=mips'],
},
{
'name': 'sysroot_mips64',
'pattern': '.',
'condition': 'checkout_linux and checkout_mips64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=mips64el'],
},
{
'name': 'sysroot_x64',
'pattern': '.',
'condition': 'checkout_linux and checkout_x64',
'action': [
'python3',
'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x64'],
},
{
# Update the prebuilt clang toolchain.
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
'action': ['python3', 'partition_allocator/tools/clang/scripts/update.py'],
},
]
noparent = True
include_rules = [

View File

@ -17,11 +17,10 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_PKEYS)
#if BUILDFLAG(IS_APPLE)
#include <sys/mman.h>
#endif
@ -51,7 +50,7 @@ void DecommitPages(uintptr_t address, size_t size) {
void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {
PA_DCHECK(!(ptr & kSuperPageOffsetMask));
PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
PA_CHECK(handle > 0 && handle <= std::size(aligned_pools_.pools_));
PA_CHECK(handle > 0 && handle <= std::size(pools_));
Pool* pool = GetPool(handle);
PA_CHECK(!pool->IsInitialized());
@ -77,8 +76,8 @@ uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i)
aligned_pools_.pools_[i].Reset();
for (pool_handle i = 0; i < std::size(pools_); ++i)
pools_[i].Reset();
}
void AddressPoolManager::Remove(pool_handle handle) {
@ -293,9 +292,6 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
if (IsConfigurablePoolAvailable()) {
GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats);
}
#if BUILDFLAG(ENABLE_PKEYS)
GetPoolStats(kPkeyPoolHandle, &stats->pkey_pool_stats);
#endif
return true;
}
@ -345,11 +341,9 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address,
size_t length) {
PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
uintptr_t address =
AllocPages(requested_address, length, kSuperPageSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
uintptr_t address = AllocPages(requested_address, length, kSuperPageSize,
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc);
return address;
}

View File

@ -35,17 +35,17 @@ namespace partition_alloc::internal {
// AddressPoolManager takes a reserved virtual address space and manages address
// space allocation.
//
// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a
// AddressPoolManager (currently) supports up to 3 pools. Each pool manages a
// contiguous reserved address space. Alloc() takes a pool_handle and returns
// address regions from the specified pool. Free() also takes a pool_handle and
// returns the address region back to the manager.
//
// (32bit version)
// AddressPoolManager wraps AllocPages and FreePages and remembers allocated
// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps
// to judge whether a given address is in a pool that supports BackupRefPtr or
// in a pool that doesn't. All PartitionAlloc allocations must be in either of
// the pools.
// address regions using bitmaps. IsManagedByPartitionAllocBRPPool and
// IsManagedByPartitionAllocRegularPool use the bitmaps to judge whether a given
// address is in a pool that supports BackupRefPtr or in a pool that doesn't.
// All PartitionAlloc allocations must be in either of the pools.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
public:
static AddressPoolManager& GetInstance();
@ -93,11 +93,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
private:
friend class AddressPoolManagerForTesting;
#if BUILDFLAG(ENABLE_PKEYS)
// If we use a pkey pool, we need to tag its metadata with the pkey. Allow the
// function to get access to the pool pointer.
friend void TagGlobalsWithPkey(int pkey);
#endif
constexpr AddressPoolManager() = default;
~AddressPoolManager() = default;
@ -152,21 +147,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
return &aligned_pools_.pools_[handle - 1];
return &pools_[handle - 1];
}
// Gets the stats for the pool identified by `handle`, if
// initialized.
void GetPoolStats(pool_handle handle, PoolStats* stats);
// If pkey support is enabled, we need to pkey-tag the pkey pool (which needs
// to be last). For this, we need to add padding in front of the pools so that
// pkey one starts on a page boundary.
struct {
char pad_[PA_PKEY_ARRAY_PAD_SZ(Pool, kNumPools)] = {};
Pool pools_[kNumPools];
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {};
} aligned_pools_ PA_PKEY_ALIGN;
Pool pools_[kNumPools];
#endif // defined(PA_HAS_64_BITS_POINTERS)

View File

@ -38,9 +38,6 @@ struct AddressSpaceStats {
size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // defined(PA_HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS)
PoolStats pkey_pool_stats;
#endif
};
// Interface passed to `AddressPoolManager::DumpStats()` to mediate

View File

@ -48,20 +48,33 @@ PartitionAlloc-Everywhere and must be `true` as a prerequisite for
enabling PA-E.
***
### `use_partition_alloc_as_malloc`
### `use_allocator`
Does nothing special when value is `false`. Enables
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is `true`.
Does nothing special when value is `"none"`. Enables
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is
`"partition"`.
*** note
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
includes Blink where PartitionAlloc originated, setting
`use_partition_alloc_as_malloc = false` does not disable PA usage in Blink,
which invokes PA explicitly (not via malloc).
* `use_partition_alloc_as_malloc = true` must not be confused
`use_allocator = "none"` does not disable PA usage in Blink.
* `use_allocator = "partition"` internally sets
`use_partition_alloc_as_malloc = true`, which must not be confused
with `use_partition_alloc` (see above).
***
### `use_backup_ref_ptr`
Specifies `BackupRefPtr` as the implementation for `base::raw_ptr<T>`
when `true`. See the [MiraclePtr documentation][miracleptr-doc].
*** aside
BRP requires support from PartitionAlloc, so `use_backup_ref_ptr` also
compiles the relevant code into PA. However, this arg does _not_ govern
whether or not BRP is actually enabled at runtime - that functionality
is controlled by a Finch flag.
***
## Note: Component Builds
When working on PartitionAlloc, know that `is_debug` defaults to

View File

@ -87,11 +87,4 @@ void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}
ThreadAllocStats GetAllocStatsForCurrentThread() {
ThreadCache* thread_cache = ThreadCache::Get();
if (ThreadCache::IsValid(thread_cache))
return thread_cache->thread_alloc_stats();
return {};
}
} // namespace partition_alloc::internal

View File

@ -6,7 +6,6 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/thread_cache.h"
namespace partition_alloc::internal {
@ -20,10 +19,6 @@ void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// process.
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Get allocation stats for the thread cache partition on the current
// thread. See the documentation of ThreadAllocStats for details.
ThreadAllocStats GetAllocStatsForCurrentThread();
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -25,24 +25,6 @@ PartitionAlloc's build will expect them at
In addition, something must provide `build_with_chromium = false` to
the PA build system.
## `use_partition_alloc`
The `use_partition_alloc` GN arg, described in
[`build_config.md`](./build_config.md), provides a GN-level seam that
embedders
1. can set in their GN args and
2. should observe in their GN recipes to conditionally pull in
PartitionAlloc.
I.E. if you have any reason to disable PartitionAlloc, you should do so
with this GN arg. Avoid pulling in PartitionAlloc headers when the
corresponding buildflag is false.
Setting `use_partition_alloc` false will also implicitly disable other
features, e.g. nixing the compilation of BackupRefPtr as the
implementation of `raw_ptr<T>`.
## Periodic Memory Reduction Routines
PartitionAlloc provides APIs to

View File

@ -101,8 +101,6 @@ Buckets consist of slot spans, organized as linked lists (see below).
primary user (the [V8 Sandbox][v8-sandbox]) can configure it at runtime,
providing a pre-existing mapping. Its allocations aren't protected by
BackupRefPtr.
* [64-bit only] The pkey pool is returning memory tagged with a memory
protection key on supported platforms. It's primary user is [V8 CFI][v8-cfi].
*** promo
Pools are downgraded into a logical concept in 32-bit environments,
@ -175,4 +173,3 @@ As of 2022, PartitionAlloc-Everywhere is supported on
[max-bucket-comment]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_constants.h;l=345;drc=667e6b001f438521e1c1a1bc3eabeead7aaa1f37
[pa-thread-cache]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/thread_cache.h
[v8-sandbox]: https://docs.google.com/document/d/1FM4fQmIhEqPG8uGp5o9A-mnPB5BOeScZYpkHjo0KKA8/preview#
[v8-cfi]: https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/preview#

View File

@ -350,9 +350,7 @@ bool ReserveAddressSpace(size_t size) {
internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address) {
uintptr_t mem = internal::SystemAllocPages(
0, size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
0, size, PageAccessibilityConfiguration::kInaccessible,
PageTag::kChromium);
if (mem) {
// We guarantee this alignment when reserving address space.

View File

@ -34,12 +34,12 @@ struct PageAccessibilityConfiguration {
};
#if BUILDFLAG(ENABLE_PKEYS)
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {}
#else
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS)

View File

@ -187,8 +187,7 @@ void DecommitSystemPagesInternal(
if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
PageAccessibilityConfiguration::kInaccessible);
}
// TODO(https://crbug.com/1022062): Review whether this implementation is
@ -199,8 +198,7 @@ void DecommitSystemPagesInternal(
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
PageAccessibilityConfiguration::kInaccessible);
// TODO(https://crbug.com/1022062): this implementation will likely no longer
// be appropriate once DiscardSystemPagesInternal() migrates to a "lazy"

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
@ -201,14 +200,8 @@ bool TrySetSystemPagesAccessInternal(
uintptr_t address,
size_t length,
PageAccessibilityConfiguration accessibility) {
#if BUILDFLAG(ENABLE_PKEYS)
return 0 == PkeyMprotectIfEnabled(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility),
accessibility.pkey);
#else
return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility)));
#endif
}
void SetSystemPagesAccessInternal(
@ -216,14 +209,8 @@ void SetSystemPagesAccessInternal(
size_t length,
PageAccessibilityConfiguration accessibility) {
int access_flags = GetAccessFlags(accessibility);
#if BUILDFLAG(ENABLE_PKEYS)
int ret =
PkeyMprotectIfEnabled(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility), accessibility.pkey);
#else
int ret = PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility)));
#endif
const int ret = PA_HANDLE_EINTR(
mprotect(reinterpret_cast<void*>(address), length, access_flags));
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal
// kernel data structures cannot be allocated, (2) the address range is
@ -306,8 +293,7 @@ void DecommitSystemPagesInternal(
// crbug.com/1153021).
if (change_permissions) {
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
PageAccessibilityConfiguration::kInaccessible);
}
}

View File

@ -134,8 +134,7 @@ void DecommitSystemPagesInternal(
// Ignore accessibility_disposition, because decommitting is equivalent to
// making pages inaccessible.
SetSystemPagesAccess(address, length,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible));
PageAccessibilityConfiguration::kInaccessible);
}
void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) {

View File

@ -18,7 +18,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_IOS)
@ -29,7 +28,7 @@
#include <windows.h>
#endif // BUILDFLAG(IS_WIN)
#if defined(PA_ENABLE_SHADOW_METADATA) || BUILDFLAG(ENABLE_PKEYS)
#if defined(PA_ENABLE_SHADOW_METADATA)
#include <sys/mman.h>
#endif
@ -103,11 +102,7 @@ PA_NOINLINE void HandlePoolAllocFailure() {
} // namespace
#if BUILDFLAG(ENABLE_PKEYS)
alignas(PA_PKEY_ALIGN_SZ)
#else
alignas(kPartitionCachelineSize)
#endif
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;
#if defined(PA_ENABLE_SHADOW_METADATA)
@ -173,30 +168,6 @@ void PartitionAddressSpace::Init() {
return;
size_t regular_pool_size = RegularPoolSize();
size_t brp_pool_size = BRPPoolSize();
#if defined(PA_GLUE_CORE_POOLS)
// Gluing core pools (regular & BRP) makes sense only when both pools are of
// the same size. This the only way we can check belonging to either of the
// two with a single bitmask operation.
PA_CHECK(regular_pool_size == brp_pool_size);
// TODO(crbug.com/1362969): Support PA_ENABLE_SHADOW_METADATA.
int pools_fd = -1;
size_t glued_pool_sizes = regular_pool_size * 2;
// Note, BRP pool requires to be preceded by a "forbidden zone", which is
// conveniently taken care of by the last guard page of the regular pool.
setup_.regular_pool_base_address_ =
AllocPages(glued_pool_sizes, glued_pool_sizes,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, pools_fd);
if (!setup_.regular_pool_base_address_)
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ =
setup_.regular_pool_base_address_ + regular_pool_size;
#else // defined(PA_GLUE_CORE_POOLS)
#if defined(PA_ENABLE_SHADOW_METADATA)
int regular_pool_fd = memfd_create("/regular_pool", MFD_CLOEXEC);
#else
@ -204,14 +175,24 @@ void PartitionAddressSpace::Init() {
#endif
setup_.regular_pool_base_address_ =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
if (!setup_.regular_pool_base_address_)
HandlePoolAllocFailure();
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
#endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
regular_pool_size - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
size_t brp_pool_size = BRPPoolSize();
#if defined(PA_ENABLE_SHADOW_METADATA)
int brp_pool_fd = memfd_create("/brp_pool", MFD_CLOEXEC);
#else
@ -225,61 +206,21 @@ void PartitionAddressSpace::Init() {
uintptr_t base_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, brp_pool_fd);
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
if (!base_address)
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#endif // defined(PA_GLUE_CORE_POOLS)
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#if defined(PA_GLUE_CORE_POOLS)
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
setup_.core_pools_base_mask_ = setup_.regular_pool_base_mask_ << 1;
PA_DCHECK(setup_.core_pools_base_mask_ == (setup_.brp_pool_base_mask_ << 1));
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
AddressPoolManager::GetInstance().Add(
kRegularPoolHandle, setup_.regular_pool_base_address_, regular_pool_size);
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
AddressPoolManager::GetInstance().Add(
kBRPPoolHandle, setup_.brp_pool_base_address_, brp_pool_size);
// Sanity check pool alignment.
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
#if defined(PA_GLUE_CORE_POOLS)
PA_DCHECK(!(setup_.regular_pool_base_address_ & (glued_pool_sizes - 1)));
#endif
// Sanity check pool belonging.
PA_DCHECK(!IsInRegularPool(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_));
PA_DCHECK(IsInRegularPool(setup_.regular_pool_base_address_ +
regular_pool_size - 1));
PA_DCHECK(
!IsInRegularPool(setup_.regular_pool_base_address_ + regular_pool_size));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_));
PA_DCHECK(IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInBRPPool(setup_.brp_pool_base_address_ + brp_pool_size));
#if defined(PA_GLUE_CORE_POOLS)
PA_DCHECK(!IsInCorePools(setup_.regular_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.regular_pool_base_address_));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size - 1));
PA_DCHECK(
IsInCorePools(setup_.regular_pool_base_address_ + regular_pool_size));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ - 1));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_));
PA_DCHECK(IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size - 1));
PA_DCHECK(!IsInCorePools(setup_.brp_pool_base_address_ + brp_pool_size));
#endif // defined(PA_GLUE_CORE_POOLS)
#if PA_STARSCAN_USE_CARD_TABLE
// Reserve memory for PCScan quarantine card table.
@ -295,8 +236,7 @@ void PartitionAddressSpace::Init() {
// Reserve memory for the shadow pools.
uintptr_t regular_pool_shadow_address =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
regular_pool_shadow_offset_ =
regular_pool_shadow_address - setup_.regular_pool_base_address_;
@ -304,9 +244,8 @@ void PartitionAddressSpace::Init() {
uintptr_t brp_pool_shadow_address = AllocPagesWithAlignOffset(
0, brp_pool_size + kForbiddenZoneSize, brp_pool_size,
brp_pool_size - kForbiddenZoneSize,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc, brp_pool_fd);
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
brp_pool_shadow_offset_ =
brp_pool_shadow_address - setup_.brp_pool_base_address_;
#endif
@ -317,13 +256,8 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
// The ConfigurablePool must only be initialized once.
PA_CHECK(!IsConfigurablePoolInitialized());
#if BUILDFLAG(ENABLE_PKEYS)
// It's possible that the pkey pool has been initialized first, in which case
// the setup_ memory has been made read-only. Remove the protection
// temporarily.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(kDefaultPkey);
#endif
// The other Pools must be initialized first.
Init();
PA_CHECK(pool_base);
PA_CHECK(size <= kConfigurablePoolMaxSize);
@ -336,61 +270,15 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base,
AddressPoolManager::GetInstance().Add(
kConfigurablePoolHandle, setup_.configurable_pool_base_address_, size);
#if BUILDFLAG(ENABLE_PKEYS)
// Put the pkey protection back in place.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(setup_.pkey_);
#endif
}
#if BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::InitPkeyPool(int pkey) {
// The PkeyPool can't be initialized with conflicting pkeys.
if (IsPkeyPoolInitialized()) {
PA_CHECK(setup_.pkey_ == pkey);
return;
}
size_t pool_size = PkeyPoolSize();
setup_.pkey_pool_base_address_ =
AllocPages(pool_size, pool_size,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
if (!setup_.pkey_pool_base_address_)
HandlePoolAllocFailure();
PA_DCHECK(!(setup_.pkey_pool_base_address_ & (pool_size - 1)));
setup_.pkey_ = pkey;
AddressPoolManager::GetInstance().Add(
kPkeyPoolHandle, setup_.pkey_pool_base_address_, pool_size);
PA_DCHECK(!IsInPkeyPool(setup_.pkey_pool_base_address_ - 1));
PA_DCHECK(IsInPkeyPool(setup_.pkey_pool_base_address_));
PA_DCHECK(IsInPkeyPool(setup_.pkey_pool_base_address_ + pool_size - 1));
PA_DCHECK(!IsInPkeyPool(setup_.pkey_pool_base_address_ + pool_size));
// TODO(1362969): support PA_ENABLE_SHADOW_METADATA
}
#endif // BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::UninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
UninitPkeyPoolForTesting(); // IN-TEST
#endif
#if defined(PA_GLUE_CORE_POOLS)
// The core pools (regular & BRP) were allocated using a single allocation of
// double size.
FreePages(setup_.regular_pool_base_address_, 2 * RegularPoolSize());
#else // defined(PA_GLUE_CORE_POOLS)
FreePages(setup_.regular_pool_base_address_, RegularPoolSize());
// For BRP pool, the allocation region includes a "forbidden zone" before the
// pool.
const size_t kForbiddenZoneSize = PageAllocationGranularity();
FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize,
BRPPoolSize() + kForbiddenZoneSize);
#endif // defined(PA_GLUE_CORE_POOLS)
// Do not free pages for the configurable pool, because its memory is owned
// by someone else, but deinitialize it nonetheless.
setup_.regular_pool_base_address_ = kUninitializedPoolBaseAddress;
@ -401,37 +289,11 @@ void PartitionAddressSpace::UninitForTesting() {
}
void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
// It's possible that the pkey pool has been initialized first, in which case
// the setup_ memory has been made read-only. Remove the protection
// temporarily.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(kDefaultPkey);
#endif
AddressPoolManager::GetInstance().Remove(kConfigurablePoolHandle);
setup_.configurable_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.configurable_pool_base_mask_ = 0;
#if BUILDFLAG(ENABLE_PKEYS)
// Put the pkey protection back in place.
if (IsPkeyPoolInitialized())
TagGlobalsWithPkey(setup_.pkey_);
#endif
}
#if BUILDFLAG(ENABLE_PKEYS)
void PartitionAddressSpace::UninitPkeyPoolForTesting() {
if (IsPkeyPoolInitialized()) {
TagGlobalsWithPkey(kDefaultPkey);
PkeySettings::settings.enabled = false;
FreePages(setup_.pkey_pool_base_address_, PkeyPoolSize());
AddressPoolManager::GetInstance().Remove(kPkeyPoolHandle);
setup_.pkey_pool_base_address_ = kUninitializedPoolBaseAddress;
setup_.pkey_ = kInvalidPkey;
}
}
#endif
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
PageCharacteristics page_characteristics;

View File

@ -21,7 +21,6 @@
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -66,11 +65,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
PA_DCHECK(IsConfigurablePoolInitialized());
pool = kConfigurablePoolHandle;
base = setup_.configurable_pool_base_address_;
#if BUILDFLAG(ENABLE_PKEYS)
} else if (IsInPkeyPool(address)) {
pool = kPkeyPoolHandle;
base = setup_.pkey_pool_base_address_;
#endif
} else {
PA_NOTREACHED();
}
@ -93,16 +87,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
//
// This function must only be called from the main thread.
static void InitConfigurablePool(uintptr_t pool_base, size_t size);
#if BUILDFLAG(ENABLE_PKEYS)
static void InitPkeyPool(int pkey);
static void UninitPkeyPoolForTesting();
#endif
static void UninitForTesting();
static void UninitConfigurablePoolForTesting();
static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The
// configurable and pkey pool are initialized separately.
// configurable pool is initialized separately.
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
PA_DCHECK(setup_.brp_pool_base_address_ != kUninitializedPoolBaseAddress);
return true;
@ -117,12 +107,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
kUninitializedPoolBaseAddress;
}
#if BUILDFLAG(ENABLE_PKEYS)
static PA_ALWAYS_INLINE bool IsPkeyPoolInitialized() {
return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
}
#endif
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
@ -148,25 +132,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
}
#if defined(PA_GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInCorePools(uintptr_t address) {
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
#else
// When PA_GLUE_CORE_POOLS is on, the BRP pool is placed at the end of the
// regular pool, effectively forming one virtual pool of a twice bigger
// size. Adjust the mask appropriately.
constexpr uintptr_t core_pools_base_mask = kRegularPoolBaseMask << 1;
#endif
bool ret =
(address & core_pools_base_mask) == setup_.regular_pool_base_address_;
PA_DCHECK(ret == (IsInRegularPool(address) || IsInBRPPool(address)));
return ret;
}
#endif // defined(PA_GLUE_CORE_POOLS)
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
@ -182,13 +147,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return setup_.configurable_pool_base_address_;
}
#if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr.
static PA_ALWAYS_INLINE bool IsInPkeyPool(uintptr_t address) {
return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
}
#endif
#if defined(PA_ENABLE_SHADOW_METADATA)
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
if (pool == kRegularPoolHandle) {
@ -225,12 +183,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
}
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS)
constexpr static PA_ALWAYS_INLINE size_t PkeyPoolSize() {
return kPkeyPoolSize;
}
#endif
// On 64-bit systems, PA allocates from several contiguous, mutually disjoint
// pools. The BRP pool is where all allocations have a BRP ref-count, thus
// pointers pointing there can use a BRP protection against UaF. Allocations
@ -252,12 +204,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSize));
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr size_t kPkeyPoolSize = kGiB / 4;
static_assert(base::bits::IsPowerOfTwo(kPkeyPoolSize));
#endif
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
base::bits::IsPowerOfTwo(kBRPPoolSize));
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
// We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
// crbug.com/1101421 and crbug.com/1217759).
@ -265,14 +213,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr size_t kBRPPoolSizeForLegacyWindows = 4 * kGiB;
static_assert(kRegularPoolSizeForLegacyWindows < kRegularPoolSize);
static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) &&
base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize));
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize) &&
base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
#if BUILDFLAG(IS_IOS)
@ -287,8 +235,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess));
static_assert(base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess) &&
base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
#endif // BUILDFLAG(IOS_IOS)
#if !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
@ -301,12 +249,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
#endif // !defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr uintptr_t kPkeyPoolOffsetMask =
static_cast<uintptr_t>(kPkeyPoolSize) - 1;
static constexpr uintptr_t kPkeyPoolBaseMask = ~kPkeyPoolOffsetMask;
#endif
// This must be set to such a value that IsIn*Pool() always returns false when
// the pool isn't initialized.
static constexpr uintptr_t kUninitializedPoolBaseAddress =
@ -320,22 +262,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
#if BUILDFLAG(ENABLE_PKEYS)
pkey_pool_base_address_(kUninitializedPoolBaseAddress),
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
regular_pool_base_mask_(0),
brp_pool_base_mask_(0),
#if defined(PA_GLUE_CORE_POOLS)
core_pools_base_mask_(0),
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
configurable_pool_base_mask_(0)
#if BUILDFLAG(ENABLE_PKEYS)
,
pkey_(kInvalidPkey)
#endif
{
configurable_pool_base_mask_(0) {
}
// Using a union to enforce padding.
@ -344,62 +275,30 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
uintptr_t regular_pool_base_address_;
uintptr_t brp_pool_base_address_;
uintptr_t configurable_pool_base_address_;
#if BUILDFLAG(ENABLE_PKEYS)
uintptr_t pkey_pool_base_address_;
#endif
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t regular_pool_base_mask_;
uintptr_t brp_pool_base_mask_;
#if defined(PA_GLUE_CORE_POOLS)
uintptr_t core_pools_base_mask_;
#endif
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
uintptr_t configurable_pool_base_mask_;
#if BUILDFLAG(ENABLE_PKEYS)
int pkey_;
#endif
};
#if BUILDFLAG(ENABLE_PKEYS)
// With pkey support, we want to be able to pkey-tag all global metadata
// which requires page granularity.
char one_page_[SystemPageSize()];
#else
char one_cacheline_[kPartitionCachelineSize];
#endif
};
};
#if BUILDFLAG(ENABLE_PKEYS)
static_assert(sizeof(PoolSetup) % SystemPageSize() == 0,
"PoolSetup has to fill a page(s)");
#else
static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
"PoolSetup has to fill a cacheline(s)");
#endif
// See the comment describing the address layout above.
//
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
#if BUILDFLAG(ENABLE_PKEYS)
static_assert(PA_PKEY_ALIGN_SZ >= kPartitionCachelineSize);
alignas(PA_PKEY_ALIGN_SZ)
#else
alignas(kPartitionCachelineSize)
#endif
static PoolSetup setup_ PA_CONSTINIT;
alignas(kPartitionCachelineSize) static PoolSetup setup_;
#if defined(PA_ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
static std::ptrdiff_t brp_pool_shadow_offset_;
#endif
#if BUILDFLAG(ENABLE_PKEYS)
// If we use a pkey pool, we need to tag its metadata with the pkey. Allow the
// function to get access to the PoolSetup.
friend void TagGlobalsWithPkey(int pkey);
#endif
};
PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
@ -432,9 +331,6 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
return internal::PartitionAddressSpace::IsInRegularPool(address)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|| internal::PartitionAddressSpace::IsInBRPPool(address)
#endif
#if BUILDFLAG(ENABLE_PKEYS)
|| internal::PartitionAddressSpace::IsInPkeyPool(address)
#endif
|| internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
@ -449,27 +345,12 @@ PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInBRPPool(address);
}
#if defined(PA_GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocCorePools(uintptr_t address) {
return internal::PartitionAddressSpace::IsInCorePools(address);
}
#endif // defined(PA_GLUE_CORE_POOLS)
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
uintptr_t address) {
return internal::PartitionAddressSpace::IsInConfigurablePool(address);
}
#if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr.
PA_ALWAYS_INLINE bool IsManagedByPartitionAllocPkeyPool(uintptr_t address) {
return internal::PartitionAddressSpace::IsInPkeyPool(address);
}
#endif
PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
}

View File

@ -11,7 +11,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -57,10 +56,6 @@ PA_ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// of uininitialized / freed memory, and makes tests run significantly
// faster. Note that for direct-mapped allocations, memory is decomitted at
// free() time, so freed memory usage cannot happen.
#if BUILDFLAG(ENABLE_PKEYS)
LiftPkeyRestrictionsScope lift_pkey_restrictions;
#endif
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}

View File

@ -102,9 +102,6 @@ void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
}
void PartitionAllocGlobalUninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
internal::PartitionAddressSpace::UninitPkeyPoolForTesting();
#endif
#if BUILDFLAG(STARSCAN)
internal::PCScan::UninitForTesting(); // IN-TEST
#endif // BUILDFLAG(STARSCAN)

View File

@ -12,59 +12,41 @@ if (use_partition_alloc_as_malloc_default) {
}
declare_args() {
# Whether PartitionAlloc should be available for use or not.
# true makes PartitionAlloc linked to the executable or shared library and
# makes it available for use. It doesn't mean that the default allocator
# is PartitionAlloc, which is governed by |use_partition_alloc_as_malloc|.
#
# N.B. generally, embedders should look at this GN arg and at the
# corresponding buildflag to determine whether to interact with PA
# source at all (pulling the component in via GN, including headers,
# etc.). There is nothing stopping a lazy embedder from ignoring this
# and unconditionally using PA, but such a setup is inadvisable.
#
# This flag is currently set to false only on Cronet bots, because Cronet
# doesn't use PartitionAlloc at all, and doesn't wish to incur the library
# size increase (crbug.com/674570).
use_partition_alloc = true
}
declare_args() {
# PartitionAlloc-Everywhere (PA-E).
use_partition_alloc_as_malloc =
use_partition_alloc && use_partition_alloc_as_malloc_default
}
if (!use_partition_alloc) {
assert(
!use_partition_alloc_as_malloc,
"Can't use PartitionAlloc-Everywhere when PartitionAlloc is wholly disabled")
# Temporarily move |use_allocator| to partition_alloc.gni, because
# some bots use |use_allocator|="none" with
# |use_partition_alloc_as_malloc_default|=true. This causes PA_CHECK()
# failure at PartitionAllocator::init().
# TODO(1151236): Replace |use_allocator|="none" and |use_allocator|=
# "partition" with |use_partition_alloc_as_malloc|=false and
# |use_partition_alloc_as_malloc|=true, and remove |use_allocator| from
# args.gn of all trybots. //base/allocator will look at
# |use_partition_alloc_as_malloc| and will generate buildflags.
# Memory allocator to use. Set to "none" to use default allocator.
use_allocator = _default_allocator
}
declare_args() {
use_freeslot_bitmap = false
# Puts the regular and BRP pools right next to each other, so that we can
# check "belongs to one of the two pools" with a single bitmask operation.
glue_core_pools = false
}
declare_args() {
# Build support for Use-after-Free protection via BackupRefPtr (BRP) or
# MTECheckedPtr. To be effective, these need to be paired with raw_ptr<>.
#
# These are effective only for memory allocated from PartitionAlloc, so it is
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
# can create and use PA partitions explicitly.
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
# of raw_ptr<T>, and enable PartitionAlloc support for it.
enable_backup_ref_ptr_support =
use_partition_alloc && enable_backup_ref_ptr_support_default
enable_backup_ref_ptr_support_default && use_allocator == "partition"
enable_mte_checked_ptr_support =
use_partition_alloc && enable_mte_checked_ptr_support_default
enable_mte_checked_ptr_support_default && use_allocator == "partition"
}
assert(!(enable_backup_ref_ptr_support && enable_mte_checked_ptr_support),
"MTECheckedPtrSupport conflicts with BRPSupport.")
declare_args() {
enable_partition_alloc_as_malloc_support =
use_partition_alloc_as_malloc_default && use_allocator == "partition"
}
declare_args() {
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
# previous slot (or in metadata if a slot starts on the page boundary), as
@ -78,19 +60,8 @@ declare_args() {
enable_backup_ref_ptr_slow_checks =
enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support
enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid
# accounting for the cost of calling the PA's embedder's callbacks when a
# dangling pointer has been detected, this simulates the raw_ptr to be
# allowed to dangle.
#
# This flag is temporary, and isn't used by PA embedders, so it doesn't need
# to go through build_overrides
enable_dangling_raw_ptr_perf_experiment = false
}
declare_args() {
@ -114,12 +85,6 @@ assert(
enable_backup_ref_ptr_support || !enable_dangling_raw_ptr_checks,
"Can't enable dangling raw_ptr checks if BackupRefPtr isn't enabled at all")
# To run the dangling raw_ptr detector experiment, the underlying feature must
# be enabled too.
assert(
enable_dangling_raw_ptr_checks || !enable_dangling_raw_ptr_perf_experiment,
"Missing dangling pointer checks feature for its performance experiment")
declare_args() {
enable_pkeys = is_linux && target_cpu == "x64"
}

View File

@ -1,23 +0,0 @@
# `partition_alloc_base/`
This is a rough mirror of Chromium's `//base`, cut down to the necessary
files and contents that PartitionAlloc pulls in. Small tweaks (n.b.
macro renaming) have been made to prevent compilation issues, but we
generally prefer that this be a mostly unmutated subset of `//base`.
## Update Policy
TBD.
* This directory may drift out of sync with `//base`.
* We will merge security updates from Chromium's `//base` once we are
made aware of them.
* We may elect to freshen files when we need to use new `//base`
functionality in PA.
## Augmentation Policy
Prefer not to directly modify contents here. Add them into
`augmentations/`, documenting the usage and provenance of each addition.

View File

@ -1,22 +0,0 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_
// Extensions for PA's copy of `//base/compiler_specific.h`.
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
// PA_ATTRIBUTE_RETURNS_NONNULL
//
// Tells the compiler that a function never returns a null pointer.
// Sourced from Abseil's `attributes.h`.
#if PA_HAS_ATTRIBUTE(returns_nonnull)
#define PA_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
#else
#define PA_ATTRIBUTE_RETURNS_NONNULL
#endif
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_AUGMENTATIONS_COMPILER_SPECIFIC_H_

View File

@ -98,11 +98,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CheckError {
LogMessage* log_message_;
};
#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#error "Debug builds are not expected to be optimized as official builds."
#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#if defined(OFFICIAL_BUILD) && !BUILDFLAG(PA_DCHECK_IS_ON)
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
// Discard log strings to reduce code bloat.
//

View File

@ -147,8 +147,6 @@ void CPU::Initialize() {
has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
has_pku_ = (cpu_info7[2] & 0x00000008) != 0;
}
// Get the brand string of the cpu.

View File

@ -68,13 +68,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CPU final {
constexpr bool has_bti() const { return false; }
#endif
#if defined(ARCH_CPU_X86_FAMILY)
// Memory protection key support for user-mode pages
bool has_pku() const { return has_pku_; }
#else
constexpr bool has_pku() const { return false; }
#endif
private:
// Query the processor for CPUID information.
void Initialize();
@ -97,9 +90,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) CPU final {
#if defined(ARCH_CPU_ARM_FAMILY)
bool has_mte_ = false; // Armv8.5-A MTE (Memory Taggging Extension)
bool has_bti_ = false; // Armv8.5-A BTI (Branch Target Identification)
#endif
#if defined(ARCH_CPU_X86_FAMILY)
bool has_pku_ = false;
#endif
bool has_non_stop_time_stamp_counter_ = false;
bool is_running_in_vm_ = false;

View File

@ -1,16 +0,0 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_
// Forward declare Windows compatible handles.
#define PA_WINDOWS_HANDLE_TYPE(name) \
struct name##__; \
typedef struct name##__* name;
#include "base/allocator/partition_allocator/partition_alloc_base/win/win_handle_types_list.inc"
#undef PA_WINDOWS_HANDLE_TYPE
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_WIN_WIN_HANDLE_TYPES_H_

View File

@ -1,25 +0,0 @@
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file intentionally does not have header guards, it's included
// inside a macro to generate enum values. The following line silences a
// presubmit and Tricium warning that would otherwise be triggered by this:
// no-include-guard-because-multiply-included
// NOLINT(build/header_guard)
// This file contains the list of Windows handle types that can be recognized
// via specific pointee types declared in //base/win/windows_types.h
// (e.g. `HDC` points to a fake/forward-declared `HDC__` struct).
PA_WINDOWS_HANDLE_TYPE(HDC)
PA_WINDOWS_HANDLE_TYPE(HDESK)
PA_WINDOWS_HANDLE_TYPE(HGLRC)
PA_WINDOWS_HANDLE_TYPE(HICON)
PA_WINDOWS_HANDLE_TYPE(HINSTANCE)
PA_WINDOWS_HANDLE_TYPE(HKEY)
PA_WINDOWS_HANDLE_TYPE(HKL)
PA_WINDOWS_HANDLE_TYPE(HMENU)
PA_WINDOWS_HANDLE_TYPE(HWINSTA)
PA_WINDOWS_HANDLE_TYPE(HWND)
PA_WINDOWS_HANDLE_TYPE(HMONITOR)

View File

@ -46,14 +46,6 @@ static_assert(sizeof(void*) != 8, "");
#endif // defined(PA_HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
// Puts the regular and BRP pools right next to each other, so that we can
// check "belongs to one of the two pools" with a single bitmask operation.
//
// This setting is specific to 64-bit, as 32-bit has a different implementation.
#if defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS)
#define PA_GLUE_CORE_POOLS
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#include <linux/version.h>
@ -146,19 +138,8 @@ static_assert(sizeof(void*) != 8, "");
#define PA_HAS_FREELIST_SHADOW_ENTRY
#endif
// Build MTECheckedPtr code.
//
// Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
defined(PA_HAS_64_BITS_POINTERS) && !defined(PA_HAS_MEMORY_TAGGING)
#define PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS
#endif // BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) &&
// defined(PA_HAS_64_BITS_POINTERS) && !defined(PA_HAS_MEMORY_TAGGING)
// Specifies whether allocation extras need to be added.
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#define PA_EXTRAS_REQUIRED
#endif
@ -261,6 +242,16 @@ constexpr bool kUseLazyCommit = false;
#define PA_PREFER_SMALLER_SLOT_SPANS
#endif // BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
// Build MTECheckedPtr code.
//
// Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
defined(PA_HAS_64_BITS_POINTERS) && !defined(PA_HAS_MEMORY_TAGGING)
#define PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS
#endif // BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) &&
// defined(PA_HAS_64_BITS_POINTERS) && !defined(PA_HAS_MEMORY_TAGGING)
// Enable shadow metadata.
//
// With this flag, shadow pools will be mapped, on which writable shadow

View File

@ -261,11 +261,7 @@ constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// PartitionAlloc's address space is split into pools. See `glossary.md`.
#if defined(PA_HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS)
constexpr size_t kNumPools = 4;
#else
constexpr size_t kNumPools = 3;
#endif
// Maximum pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
// allows to choose a different size at initialization time for certain
@ -288,12 +284,6 @@ constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
static constexpr pool_handle kRegularPoolHandle = 1;
static constexpr pool_handle kBRPPoolHandle = 2;
static constexpr pool_handle kConfigurablePoolHandle = 3;
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr pool_handle kPkeyPoolHandle = 4;
static_assert(
kPkeyPoolHandle == kNumPools,
"The pkey pool must come last since we pkey_mprotect its metadata.");
#endif
// Slots larger than this size will not receive MTE protection. Pages intended
// for allocations larger than this constant should not be backed with PROT_MTE

View File

@ -306,11 +306,9 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
RecommitSystemPages(reservation_start + SystemPageSize(),
SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kRead),
PageAccessibilityConfiguration::kRead,
#else
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityDisposition::kRequireUpdate);
}
@ -323,8 +321,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
ScopedSyscallTimer timer{root};
RecommitSystemPages(reservation_start + SystemPageSize() * 2,
SystemPageSize(),
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
@ -334,8 +331,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(reservation_start, pool),
SystemPageSize(),
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
@ -712,8 +708,7 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
#endif
SetSystemPagesAccess(root->next_tag_bitmap_page,
next_tag_bitmap_page - root->next_tag_bitmap_page,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kReadWrite));
PageAccessibilityConfiguration::kReadWrite);
root->next_tag_bitmap_page = next_tag_bitmap_page;
}
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
@ -812,11 +807,9 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
ScopedSyscallTimer timer{root};
RecommitSystemPages(super_page + SystemPageSize(), SystemPageSize(),
#if defined(PA_ENABLE_SHADOW_METADATA)
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kRead),
PageAccessibilityConfiguration::kRead,
#else
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
#endif
PageAccessibilityDisposition::kRequireUpdate);
}
@ -827,8 +820,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
if (root->ChoosePool() == kBRPPoolHandle) {
ScopedSyscallTimer timer{root};
RecommitSystemPages(super_page + SystemPageSize() * 2, SystemPageSize(),
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
@ -838,8 +830,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
ScopedSyscallTimer timer{root};
RecommitSystemPages(ShadowMetadataStart(super_page, root->ChoosePool()),
SystemPageSize(),
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif
@ -911,8 +902,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
{
ScopedSyscallTimer timer{root};
RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
PCScan::RegisterNewSuperPage(root, super_page);
@ -927,8 +917,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
ScopedSyscallTimer timer{root};
RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
root->PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
#endif

View File

@ -14,7 +14,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/spinning_mutex.h"
#include "build/build_config.h"
@ -25,10 +24,6 @@ class PA_LOCKABLE Lock {
inline constexpr Lock();
void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
#if BUILDFLAG(PA_DCHECK_IS_ON)
#if BUILDFLAG(ENABLE_PKEYS)
LiftPkeyRestrictionsScope lift_pkey_restrictions;
#endif
// When PartitionAlloc is malloc(), it can easily become reentrant. For
// instance, a DCHECK() triggers in external code (such as
// base::Lock). DCHECK() error message formatting allocates, which triggers
@ -74,9 +69,6 @@ class PA_LOCKABLE Lock {
void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
lock_.AssertAcquired();
#if BUILDFLAG(PA_DCHECK_IS_ON)
#if BUILDFLAG(ENABLE_PKEYS)
LiftPkeyRestrictionsScope lift_pkey_restrictions;
#endif
PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
base::PlatformThread::CurrentRef());
#endif
@ -97,8 +89,7 @@ class PA_LOCKABLE Lock {
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Should in theory be protected by |lock_|, but we need to read it to detect
// recursive lock acquisition (and thus, the allocator becoming reentrant).
std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
base::PlatformThreadRef();
std::atomic<base::PlatformThreadRef> owning_thread_ref_{};
#endif
};

View File

@ -334,17 +334,10 @@ void UnmapNow(uintptr_t reservation_start,
} else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{
PA_DCHECK(
pool == kRegularPoolHandle
#if BUILDFLAG(ENABLE_PKEYS)
|| pool == kPkeyPoolHandle
#endif
|| (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle));
PA_DCHECK(pool == kRegularPoolHandle || (IsConfigurablePoolAvailable() &&
pool == kConfigurablePoolHandle));
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
#if BUILDFLAG(ENABLE_PKEYS)
IsManagedByPartitionAllocPkeyPool(reservation_start) ||
#endif
IsManagedByPartitionAllocConfigurablePool(reservation_start));
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)

View File

@ -108,17 +108,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
// https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
PA_ALWAYS_INLINE void Acquire() {
CheckCookieIfSupported();
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
constexpr CountType kInc = kUnprotectedPtrInc;
constexpr CountType kMask = kUnprotectedPtrCountMask;
#else
constexpr CountType kInc = kPtrInc;
constexpr CountType kMask = kPtrCountMask;
#endif
CountType old_count = count_.fetch_add(kInc, std::memory_order_relaxed);
CountType old_count = count_.fetch_add(kPtrInc, std::memory_order_relaxed);
// Check overflow.
PA_CHECK((old_count & kMask) != kMask);
PA_CHECK((old_count & kPtrCountMask) != kPtrCountMask);
}
// Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
@ -138,18 +130,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
// Returns true if the allocation should be reclaimed.
PA_ALWAYS_INLINE bool Release() {
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
constexpr CountType kInc = kUnprotectedPtrInc;
constexpr CountType kMask = kUnprotectedPtrCountMask;
#else
constexpr CountType kInc = kPtrInc;
constexpr CountType kMask = kPtrCountMask;
#endif
CheckCookieIfSupported();
CountType old_count = count_.fetch_sub(kInc, std::memory_order_release);
CountType old_count = count_.fetch_sub(kPtrInc, std::memory_order_release);
// Check underflow.
PA_DCHECK(old_count & kMask);
PA_DCHECK(old_count & kPtrCountMask);
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
// If a dangling raw_ptr<> was detected, report it.
@ -160,7 +145,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
}
#endif
return ReleaseCommon(old_count - kInc);
return ReleaseCommon(old_count - kPtrInc);
}
// Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>

View File

@ -24,7 +24,6 @@
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -641,16 +640,6 @@ void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address) {
}
#endif
#if BUILDFLAG(ENABLE_PKEYS)
void PartitionAllocPkeyInit(int pkey) {
PkeySettings::settings.enabled = true;
PartitionAddressSpace::InitPkeyPool(pkey);
// Call TagGlobalsWithPkey last since we might not have write permissions to
// to memory tagged with `pkey` at this point.
TagGlobalsWithPkey(pkey);
}
#endif // BUILDFLAG(ENABLE_PKEYS)
} // namespace internal
template <bool thread_safe>
@ -728,15 +717,9 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
auto* curr = first_extent;
while (curr != nullptr) {
auto* next = curr->next;
uintptr_t address = SuperPagesBeginFromExtent(curr);
size_t size =
internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
#if !defined(PA_HAS_64_BITS_POINTERS)
internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
size);
#endif
internal::AddressPoolManager::GetInstance().UnreserveAndDecommit(
pool_handle, address, size);
pool_handle, SuperPagesBeginFromExtent(curr),
internal::kSuperPageSize * curr->number_of_consecutive_super_pages);
curr = next;
}
}
@ -748,44 +731,6 @@ void PartitionRoot<thread_safe>::EnableMac11MallocSizeHackForTesting() {
}
#endif // defined(PA_ENABLE_MAC11_MALLOC_SIZE_HACK)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!defined(PA_HAS_64_BITS_POINTERS)
namespace {
std::atomic<bool> g_reserve_brp_guard_region_called;
// An address constructed by repeating `kQuarantinedByte` shouldn't never point
// to valid memory. Preemptively reserve a memory region around that address and
// make it inaccessible. Not needed for 64-bit platforms where the address is
// guaranteed to be non-canonical. Safe to call multiple times.
void ReserveBackupRefPtrGuardRegionIfNeeded() {
bool expected = false;
// No need to block execution for potential concurrent initialization, merely
// want to make sure this is only called once.
if (!g_reserve_brp_guard_region_called.compare_exchange_strong(expected,
true))
return;
size_t alignment = internal::PageAllocationGranularity();
uintptr_t requested_address;
memset(&requested_address, internal::kQuarantinedByte,
sizeof(requested_address));
requested_address = RoundDownToPageAllocationGranularity(requested_address);
// Request several pages so that even unreasonably large C++ objects stay
// within the inaccessible region. If some of the pages can't be reserved,
// it's still preferable to try and reserve the rest.
for (size_t i = 0; i < 4; ++i) {
[[maybe_unused]] uintptr_t allocated_address =
AllocPages(requested_address, alignment, alignment,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kInaccessible),
PageTag::kPartitionAlloc);
requested_address += alignment;
}
}
} // namespace
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
// !defined(PA_HAS_64_BITS_POINTERS)
template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
{
@ -817,11 +762,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
internal::PartitionAddressSpace::Init();
#endif
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
!defined(PA_HAS_64_BITS_POINTERS)
ReserveBackupRefPtrGuardRegionIfNeeded();
#endif
flags.allow_aligned_alloc =
opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
flags.allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
@ -849,14 +789,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// BRP requires objects to be in a different Pool.
PA_CHECK(!(flags.use_configurable_pool && brp_enabled()));
#if BUILDFLAG(ENABLE_PKEYS)
// BRP and pkey mode use different pools, so they can't be enabled at the
// same time.
PA_CHECK(opts.pkey == internal::kDefaultPkey ||
opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
flags.pkey = opts.pkey;
#endif
// Ref-count messes up alignment needed for AlignedAlloc, making this
// option incompatible. However, except in the
// PUT_REF_COUNT_IN_PREVIOUS_SLOT case.
@ -880,11 +812,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
}
#if defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add one extra byte to each slot's end to allow beyond-the-end
// pointers (crbug.com/1364476).
flags.extras_size += 1;
#endif // defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#endif // defined(PA_EXTRAS_REQUIRED)
// Re-confirm the above PA_CHECKs, by making sure there are no
@ -952,12 +879,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
#if BUILDFLAG(ENABLE_PARTITION_ALLOC_AS_MALLOC_SUPPORT)
PartitionAllocMallocInitOnce();
#endif
#if BUILDFLAG(ENABLE_PKEYS)
if (flags.pkey != internal::kDefaultPkey) {
internal::PartitionAllocPkeyInit(flags.pkey);
}
#endif
}
template <bool thread_safe>
@ -1039,7 +960,6 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
// bucket->slot_size is the currently committed size of the allocation.
size_t current_slot_size = slot_span->bucket->slot_size;
size_t current_usable_size = slot_span->GetUsableSize(this);
uintptr_t slot_start = SlotSpan::ToSlotSpanStart(slot_span);
// This is the available part of the reservation up to which the new
// allocation can grow.
@ -1093,16 +1013,6 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
IncreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
slot_span->bucket->slot_size, raw_size);
// Always record in-place realloc() as free()+malloc() pair.
//
// The early returns above (`return false`) will fall back to free()+malloc(),
// so this is consistent.
auto* thread_cache = GetOrCreateThreadCache();
if (ThreadCache::IsValid(thread_cache)) {
thread_cache->RecordDeallocation(current_usable_size);
thread_cache->RecordAllocation(slot_span->GetUsableSize(this));
}
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Write a new trailing cookie.
if (flags.allow_cookie) {
@ -1127,10 +1037,8 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (AllocationCapacityFromRequestedSize(new_size) !=
AllocationCapacityFromSlotStart(slot_start)) {
AllocationCapacityFromSlotStart(slot_start))
return false;
}
size_t current_usable_size = slot_span->GetUsableSize(this);
// Trying to allocate |new_size| would use the same amount of underlying
// memory as we're already using, so re-use the allocation after updating
@ -1162,16 +1070,6 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
}
// Always record a realloc() as a free() + malloc(), even if it's in
// place. When we cannot do it in place (`return false` above), the allocator
// falls back to free()+malloc(), so this is consistent.
ThreadCache* thread_cache = GetOrCreateThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
thread_cache->RecordDeallocation(current_usable_size);
thread_cache->RecordAllocation(slot_span->GetUsableSize(this));
}
return object;
}
@ -1471,7 +1369,9 @@ void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() {
}
template <>
ThreadCache* PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCache() {
uintptr_t PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCacheAndAlloc(
uint16_t bucket_index,
size_t* slot_size) {
auto* tcache = ThreadCache::Get();
// See comment in `EnableThreadCacheIfSupport()` for why this is an acquire
// load.
@ -1484,7 +1384,7 @@ ThreadCache* PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCache() {
// be us, in which case we are re-entering and should not create a thread
// cache. If it is not us, then this merely delays thread cache
// construction a bit, which is not an issue.
return nullptr;
return 0;
}
// There is no per-thread ThreadCache allocated here yet, and this partition
@ -1507,7 +1407,10 @@ ThreadCache* PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCache() {
tcache = ThreadCache::Create(this);
thread_caches_being_constructed_.fetch_sub(1, std::memory_order_relaxed);
return tcache;
// Cache is created empty, but at least this will trigger batch fill, which
// may be useful, and we are already in a slow path anyway (first small
// allocation of this thread).
return tcache->GetFromCache(bucket_index, slot_size);
}
template <>

View File

@ -66,7 +66,6 @@
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/allocator/partition_allocator/thread_cache.h"
@ -204,25 +203,14 @@ struct PartitionOptions {
Cookie cookie,
BackupRefPtr backup_ref_ptr,
BackupRefPtrZapping backup_ref_ptr_zapping,
UseConfigurablePool use_configurable_pool
#if BUILDFLAG(ENABLE_PKEYS)
,
int pkey = internal::kDefaultPkey
#endif
)
UseConfigurablePool use_configurable_pool)
: aligned_alloc(aligned_alloc),
thread_cache(thread_cache),
quarantine(quarantine),
cookie(cookie),
backup_ref_ptr(backup_ref_ptr),
backup_ref_ptr_zapping(backup_ref_ptr_zapping),
use_configurable_pool(use_configurable_pool)
#if BUILDFLAG(ENABLE_PKEYS)
,
pkey(pkey)
#endif
{
}
use_configurable_pool(use_configurable_pool) {}
AlignedAlloc aligned_alloc;
ThreadCache thread_cache;
@ -231,9 +219,6 @@ struct PartitionOptions {
BackupRefPtr backup_ref_ptr;
BackupRefPtrZapping backup_ref_ptr_zapping;
UseConfigurablePool use_configurable_pool;
#if BUILDFLAG(ENABLE_PKEYS)
int pkey;
#endif
};
// Never instantiate a PartitionRoot directly, instead use
@ -295,10 +280,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
bool use_configurable_pool;
#if BUILDFLAG(ENABLE_PKEYS)
int pkey;
#endif
#if defined(PA_EXTRAS_REQUIRED)
uint32_t extras_size;
uint32_t extras_offset;
@ -553,9 +534,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
void* ptr);
PA_ALWAYS_INLINE PageAccessibilityConfiguration GetPageAccessibility() const;
PA_ALWAYS_INLINE PageAccessibilityConfiguration
PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::Permissions) const;
PA_ALWAYS_INLINE size_t
AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
@ -657,11 +635,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PA_DCHECK(IsConfigurablePoolAvailable());
return internal::kConfigurablePoolHandle;
}
#if BUILDFLAG(ENABLE_PKEYS)
if (flags.pkey != internal::kDefaultPkey) {
return internal::kPkeyPoolHandle;
}
#endif
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return brp_enabled() ? internal::kBRPPoolHandle
: internal::kRegularPoolHandle;
@ -910,10 +883,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
void DecommitEmptySlotSpans() PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
PA_ALWAYS_INLINE void RawFreeLocked(uintptr_t slot_start)
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
ThreadCache* MaybeInitThreadCache();
// May return an invalid thread cache.
PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
uintptr_t MaybeInitThreadCacheAndAlloc(uint16_t bucket_index,
size_t* slot_size);
#if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
static internal::Lock& GetEnumeratorLock();
@ -1123,8 +1094,8 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
slot_start)) {
*is_already_zeroed = false;
// This is a fast path, avoid calling GetUsableSize() in Release builds
// as it is costlier. Copy its small bucket path instead.
// This is a fast path, so avoid calling GetUsableSize() on Release builds
// as it is more costly. Copy its small bucket path instead.
*usable_size = AdjustSizeForExtrasSubtract(bucket->slot_size);
PA_DCHECK(*usable_size == slot_span->GetUsableSize(this));
@ -1513,35 +1484,21 @@ template <bool thread_safe>
PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
uintptr_t slot_start,
SlotSpan* slot_span) {
// TLS access can be expensive, do a cheap local check first.
//
// PA_LIKELY: performance-sensitive partitions have a thread cache,
// direct-mapped allocations are uncommon.
ThreadCache* thread_cache = GetOrCreateThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
if (PA_LIKELY(flags.with_thread_cache &&
!IsDirectMappedBucket(slot_span->bucket))) {
size_t bucket_index =
static_cast<size_t>(slot_span->bucket - this->buckets);
size_t slot_size;
if (PA_LIKELY(thread_cache->MaybePutInCache(slot_start, bucket_index,
&slot_size))) {
// This is a fast path, avoid calling GetUsableSize() in Release builds
// as it is costlier. Copy its small bucket path instead.
PA_DCHECK(!slot_span->CanStoreRawSize());
size_t usable_size = AdjustSizeForExtrasSubtract(slot_size);
PA_DCHECK(usable_size == slot_span->GetUsableSize(this));
thread_cache->RecordDeallocation(usable_size);
auto* thread_cache = ThreadCache::Get();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
thread_cache->MaybePutInCache(slot_start, bucket_index))) {
return;
}
}
if (PA_LIKELY(ThreadCache::IsValid(thread_cache))) {
// Accounting must be done outside `RawFree()`, as it's also called from the
// thread cache. We would double-count otherwise.
//
// GetUsableSize() will always give the correct result, and we are in a slow
// path here (since the thread cache case returned earlier).
size_t usable_size = slot_span->GetUsableSize(this);
thread_cache->RecordDeallocation(usable_size);
}
RawFree(slot_start, slot_span);
}
@ -1697,8 +1654,8 @@ PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::TryRecommitSystemPagesForData(
internal::ScopedSyscallTimer timer{this};
bool ok = TryRecommitSystemPages(address, length, GetPageAccessibility(),
accessibility_disposition);
#if defined(PA_COMMIT_CHARGE_IS_LIMITED)
if (PA_UNLIKELY(!ok)) {
// Decommit some memory and retry. The alternative is crashing.
{
::partition_alloc::internal::ScopedGuard guard(lock_);
DecommitEmptySlotSpans();
@ -1706,6 +1663,7 @@ PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::TryRecommitSystemPagesForData(
ok = TryRecommitSystemPages(address, length, GetPageAccessibility(),
accessibility_disposition);
}
#endif // defined(PA_COMMIT_CHARGE_IS_LIMITED)
if (ok)
IncreaseCommittedPages(length);
@ -1766,27 +1724,11 @@ PartitionRoot<thread_safe>::GetUsableSizeWithMac11MallocSizeHack(void* ptr) {
template <bool thread_safe>
PA_ALWAYS_INLINE PageAccessibilityConfiguration
PartitionRoot<thread_safe>::GetPageAccessibility() const {
PageAccessibilityConfiguration::Permissions permissions =
PageAccessibilityConfiguration::kReadWrite;
#if defined(PA_HAS_MEMORY_TAGGING)
if (IsMemoryTaggingEnabled())
permissions = PageAccessibilityConfiguration::kReadWriteTagged;
return PageAccessibilityConfiguration::kReadWriteTagged;
#endif
#if BUILDFLAG(ENABLE_PKEYS)
return PageAccessibilityConfiguration(permissions, flags.pkey);
#else
return PageAccessibilityConfiguration(permissions);
#endif
}
template <bool thread_safe>
PA_ALWAYS_INLINE PageAccessibilityConfiguration
PartitionRoot<thread_safe>::PageAccessibilityWithPkeyIfEnabled(
PageAccessibilityConfiguration::Permissions permissions) const {
#if BUILDFLAG(ENABLE_PKEYS)
return PageAccessibilityConfiguration(permissions, flags.pkey);
#endif
return PageAccessibilityConfiguration(permissions);
return PageAccessibilityConfiguration::kReadWrite;
}
// Return the capacity of the underlying slot (adjusted for extras). This
@ -1914,18 +1856,20 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
}
#endif // BUILDFLAG(STARSCAN)
auto* thread_cache = GetOrCreateThreadCache();
// Don't use thread cache if higher order alignment is requested, because the
// thread cache will not be able to satisfy it.
//
// PA_LIKELY: performance-sensitive partitions use the thread cache.
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
if (PA_LIKELY(this->flags.with_thread_cache &&
slot_span_alignment <= internal::PartitionPageSize())) {
// Note: getting slot_size from the thread cache rather than by
// `buckets[bucket_index].slot_size` to avoid touching `buckets` on the fast
// path.
slot_start = thread_cache->GetFromCache(bucket_index, &slot_size);
auto* tcache = ThreadCache::Get();
// PA_LIKELY: Typically always true, except for the very first allocation of
// this thread.
if (PA_LIKELY(ThreadCache::IsValid(tcache))) {
slot_start = tcache->GetFromCache(bucket_index, &slot_size);
} else {
slot_start = MaybeInitThreadCacheAndAlloc(bucket_index, &slot_size);
}
// PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
if (PA_LIKELY(slot_start)) {
@ -1961,9 +1905,6 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
if (PA_UNLIKELY(!slot_start))
return nullptr;
if (PA_LIKELY(ThreadCache::IsValid(thread_cache)))
thread_cache->RecordAllocation(usable_size);
// Layout inside the slot:
// |[refcnt]|...object...|[empty]|[cookie]|[unused]|
// <----(a)----->
@ -2223,18 +2164,6 @@ PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
#endif
}
template <bool thread_safe>
ThreadCache* PartitionRoot<thread_safe>::GetOrCreateThreadCache() {
ThreadCache* thread_cache = nullptr;
if (PA_LIKELY(flags.with_thread_cache)) {
thread_cache = ThreadCache::Get();
if (PA_UNLIKELY(!ThreadCache::IsValid(thread_cache))) {
thread_cache = MaybeInitThreadCache();
}
}
return thread_cache;
}
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==

View File

@ -15,7 +15,7 @@
namespace partition_alloc {
// Most of these are not populated if PA_THREAD_CACHE_ENABLE_STATISTICS is not
// Most of these are not populated if PA_ENABLE_THREAD_CACHE_STATISTICS is not
// defined.
struct ThreadCacheStats {
uint64_t alloc_count; // Total allocation requests.
@ -42,20 +42,6 @@ struct ThreadCacheStats {
#endif // defined(PA_THREAD_CACHE_ALLOC_STATS)
};
// Per-thread allocation statistics. Only covers allocations made through the
// partition linked to the thread cache. As the allocator doesn't record
// requested sizes in most cases, the data there will be an overestimate of the
// actually requested sizes. It is also not expected to sum up to anything
// meaningful across threads, due to the lack of synchronization. Figures there
// are cumulative, not net. Since the data below is per-thread, note a thread
// can deallocate more than it allocated.
struct ThreadAllocStats {
uint64_t alloc_count;
uint64_t alloc_total_size;
uint64_t dealloc_count;
uint64_t dealloc_total_size;
};
// Struct used to retrieve total memory usage of a partition. Used by
// PartitionStatsDumper implementation.
struct PartitionMemoryStats {
@ -72,8 +58,8 @@ struct PartitionMemoryStats {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t
total_brp_quarantined_bytes; // Total bytes that are quarantined by BRP.
size_t total_brp_quarantined_count; // Total number of slots that are
// quarantined by BRP.
size_t total_brp_quarantined_count; // Total number of slots that are
// quarantined by BRP.
size_t cumulative_brp_quarantined_bytes; // Cumulative bytes that are
// quarantined by BRP.
size_t cumulative_brp_quarantined_count; // Cumulative number of slots that
@ -100,16 +86,16 @@ struct PartitionBucketMemoryStats {
uint32_t allocated_slot_span_size; // Total size the slot span allocated
// from the system (committed pages).
uint32_t active_bytes; // Total active bytes used in the bucket.
uint32_t active_count; // Total active objects allocated in the bucket.
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
uint32_t discardable_bytes; // Total bytes that could be discarded.
uint32_t num_full_slot_spans; // Number of slot spans with all slots
// allocated.
uint32_t num_active_slot_spans; // Number of slot spans that have at least
// one provisioned slot.
uint32_t num_empty_slot_spans; // Number of slot spans that are empty
// but not decommitted.
uint32_t active_count; // Total active objects allocated in the bucket.
uint32_t resident_bytes; // Total bytes provisioned in the bucket.
uint32_t decommittable_bytes; // Total bytes that could be decommitted.
uint32_t discardable_bytes; // Total bytes that could be discarded.
uint32_t num_full_slot_spans; // Number of slot spans with all slots
// allocated.
uint32_t num_active_slot_spans; // Number of slot spans that have at least
// one provisioned slot.
uint32_t num_empty_slot_spans; // Number of slot spans that are empty
// but not decommitted.
uint32_t num_decommitted_slot_spans; // Number of slot spans that are empty
// and decommitted.
};

View File

@ -1,121 +0,0 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#if BUILDFLAG(ENABLE_PKEYS)
#include <errno.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#if !BUILDFLAG(IS_LINUX)
#error "This pkey code is currently only supported on Linux"
#endif
namespace partition_alloc::internal {
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
bool CPUHasPkeySupport() {
return base::CPU::GetInstanceNoAllocation().has_pku();
}
PkeySettings PkeySettings::settings PA_PKEY_ALIGN;
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
int PkeyMprotect(void* addr, size_t len, int prot, int pkey) {
return syscall(SYS_pkey_mprotect, addr, len, prot, pkey);
}
int PkeyMprotectIfEnabled(void* addr, size_t len, int prot, int pkey) {
if (PA_UNLIKELY(PkeySettings::settings.enabled)) {
return PkeyMprotect(addr, len, prot, pkey);
}
PA_CHECK(pkey == 0);
return mprotect(addr, len, prot);
}
void TagMemoryWithPkey(int pkey, void* address, size_t size) {
PA_DCHECK(
(reinterpret_cast<uintptr_t>(address) & PA_PKEY_ALIGN_OFFSET_MASK) == 0);
PA_PCHECK(
PkeyMprotect(address,
(size + PA_PKEY_ALIGN_OFFSET_MASK) & PA_PKEY_ALIGN_BASE_MASK,
PROT_READ | PROT_WRITE, pkey) == 0);
}
template <typename T>
void TagVariableWithPkey(int pkey, T& var) {
TagMemoryWithPkey(pkey, &var, sizeof(T));
}
void TagGlobalsWithPkey(int pkey) {
TagVariableWithPkey(pkey, PartitionAddressSpace::setup_);
AddressPoolManager::Pool* pool =
AddressPoolManager::GetInstance().GetPool(kPkeyPoolHandle);
TagVariableWithPkey(pkey, *pool);
uint16_t* pkey_reservation_offset_table =
GetReservationOffsetTable(kPkeyPoolHandle);
TagMemoryWithPkey(pkey, pkey_reservation_offset_table,
ReservationOffsetTable::kReservationOffsetTableLength);
TagVariableWithPkey(pkey, PkeySettings::settings);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
int PkeyAlloc(int access_rights) {
return syscall(SYS_pkey_alloc, 0, access_rights);
}
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void PkeyFree(int pkey) {
PA_PCHECK(syscall(SYS_pkey_free, pkey) == 0);
}
uint32_t Rdpkru() {
uint32_t pkru;
asm volatile(".byte 0x0f,0x01,0xee\n" : "=a"(pkru) : "c"(0), "d"(0));
return pkru;
}
void Wrpkru(uint32_t pkru) {
asm volatile(".byte 0x0f,0x01,0xef\n" : : "a"(pkru), "c"(0), "d"(0));
}
#if BUILDFLAG(PA_DCHECK_IS_ON)
LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
: saved_pkey_value_(kDefaultPkeyValue) {
if (!PkeySettings::settings.enabled)
return;
saved_pkey_value_ = Rdpkru();
if (saved_pkey_value_ != kDefaultPkeyValue) {
Wrpkru(kAllowAllPkeyValue);
}
}
LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
if (!PkeySettings::settings.enabled)
return;
if (Rdpkru() != saved_pkey_value_) {
Wrpkru(saved_pkey_value_);
}
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace partition_alloc::internal
#endif // BUILDFLAG(ENABLE_PKEYS)

View File

@ -1,98 +0,0 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PKEY_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PKEY_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#if BUILDFLAG(ENABLE_PKEYS)
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include <cstddef>
#include <cstdint>
#if !defined(PA_HAS_64_BITS_POINTERS)
#error "pkey support requires 64 bit pointers"
#endif
#define PA_PKEY_ALIGN_SZ SystemPageSize()
#define PA_PKEY_ALIGN_OFFSET_MASK (PA_PKEY_ALIGN_SZ - 1)
#define PA_PKEY_ALIGN_BASE_MASK (~PA_PKEY_ALIGN_OFFSET_MASK)
#define PA_PKEY_ALIGN alignas(PA_PKEY_ALIGN_SZ)
#define PA_PKEY_FILL_PAGE_SZ(size) \
((PA_PKEY_ALIGN_SZ - (size & PA_PKEY_ALIGN_OFFSET_MASK)) % PA_PKEY_ALIGN_SZ)
// Calculate the required padding so that the last element of a page-aligned
// array lands on a page boundary. In other words, calculate that padding so
// that (count-1) elements are a multiple of page size.
#define PA_PKEY_ARRAY_PAD_SZ(Type, count) \
PA_PKEY_FILL_PAGE_SZ(sizeof(Type) * (count - 1))
namespace partition_alloc::internal {
constexpr int kDefaultPkey = 0;
constexpr int kInvalidPkey = -1;
// Check if the CPU supports pkeys.
bool CPUHasPkeySupport();
// A wrapper around pkey_mprotect that falls back to regular mprotect if
// PkeySettings::enabled is false.
[[nodiscard]] int PkeyMprotectIfEnabled(void* addr,
size_t len,
int prot,
int pkey);
// A wrapper around pkey_mprotect without fallback.
[[nodiscard]] int PkeyMprotect(void* addr, size_t len, int prot, int pkey);
// If we set up a pkey pool, we need to tag global variables with the pkey to
// make them readable in case default pkey access is disabled. Called once
// during pkey pool initialization.
void TagGlobalsWithPkey(int pkey);
int PkeyAlloc(int access_rights);
void PkeyFree(int pkey);
// Read the pkru register (the current pkey state).
uint32_t Rdpkru();
// Write the pkru register (the current pkey state).
void Wrpkru(uint32_t pkru);
struct PkeySettings {
bool enabled = false;
char pad_[PA_PKEY_FILL_PAGE_SZ(sizeof(enabled))] = {};
static PkeySettings settings PA_PKEY_ALIGN PA_CONSTINIT;
};
#if BUILDFLAG(PA_DCHECK_IS_ON)
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LiftPkeyRestrictionsScope {
public:
static constexpr uint32_t kDefaultPkeyValue = 0x55555554;
static constexpr uint32_t kAllowAllPkeyValue = 0x0;
LiftPkeyRestrictionsScope();
~LiftPkeyRestrictionsScope();
private:
uint32_t saved_pkey_value_;
};
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace partition_alloc::internal
#else // BUILDFLAG(ENABLE_PKEYS)
#define PA_PKEY_ALIGN
#define PA_PKEY_FILL_PAGE_SZ(size) 0
#define PA_PKEY_ARRAY_PAD_SZ(Type, size) 0
#endif // BUILDFLAG(ENABLE_PKEYS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PKEY_H_

View File

@ -7,8 +7,8 @@
namespace partition_alloc::internal {
#if defined(PA_HAS_64_BITS_POINTERS)
ReservationOffsetTable::_PaddedReservationOffsetTables
ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN;
ReservationOffsetTable::_ReservationOffsetTable
ReservationOffsetTable::reservation_offset_tables_[];
#else
ReservationOffsetTable::_ReservationOffsetTable
ReservationOffsetTable::reservation_offset_table_;

View File

@ -19,7 +19,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -82,7 +81,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets.");
struct _ReservationOffsetTable {
static PA_CONSTINIT struct _ReservationOffsetTable {
// The number of table elements is less than MAX_UINT16, so the element type
// can be uint16_t.
static_assert(
@ -94,30 +93,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
for (uint16_t& offset : offsets)
offset = kOffsetTagNotAllocated;
}
};
#if defined(PA_HAS_64_BITS_POINTERS)
// If pkey support is enabled, we need to pkey-tag the tables of the pkey
// pool. For this, we need to pad the tables so that the pkey ones start on a
// page boundary.
struct _PaddedReservationOffsetTables {
char pad_[PA_PKEY_ARRAY_PAD_SZ(_ReservationOffsetTable, kNumPools)] = {};
struct _ReservationOffsetTable tables[kNumPools];
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(_ReservationOffsetTable))] = {};
};
static PA_CONSTINIT _PaddedReservationOffsetTables
padded_reservation_offset_tables_ PA_PKEY_ALIGN;
// One table per Pool.
} reservation_offset_tables_[kNumPools];
#else
// A single table for the entire 32-bit address space.
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
} reservation_offset_table_;
#endif
};
#if defined(PA_HAS_64_BITS_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
return ReservationOffsetTable::padded_reservation_offset_tables_
.tables[handle - 1]
.offsets;
return ReservationOffsetTable::reservation_offset_tables_[handle - 1].offsets;
}
PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
@ -181,12 +169,6 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
bool is_in_brp_pool = IsManagedByPartitionAllocBRPPool(address);
bool is_in_regular_pool = IsManagedByPartitionAllocRegularPool(address);
bool is_in_configurable_pool =
IsManagedByPartitionAllocConfigurablePool(address);
#if BUILDFLAG(ENABLE_PKEYS)
bool is_in_pkey_pool = IsManagedByPartitionAllocPkeyPool(address);
#endif
// When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!is_in_brp_pool);
@ -215,12 +197,6 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
IsManagedByPartitionAllocBRPPool(reservation_start + kBRPOffset));
PA_DCHECK(is_in_regular_pool ==
IsManagedByPartitionAllocRegularPool(reservation_start));
PA_DCHECK(is_in_configurable_pool ==
IsManagedByPartitionAllocConfigurablePool(reservation_start));
#if BUILDFLAG(ENABLE_PKEYS)
PA_DCHECK(is_in_pkey_pool ==
IsManagedByPartitionAllocPkeyPool(reservation_start));
#endif
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
#endif // BUILDFLAG(PA_DCHECK_IS_ON)

View File

@ -164,8 +164,7 @@ BASE_EXPORT void ConfigurePartitionAlloc();
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT void InitializeDefaultAllocatorPartitionRoot();
bool IsDefaultAllocatorPartitionRootInitialized();
void InitializeDefaultAllocatorPartitionRoot();
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// On macOS, the allocator shim needs to be turned on during runtime.
BASE_EXPORT void InitializeAllocatorShim();

View File

@ -15,7 +15,6 @@
#include <string.h>
#include <atomic>
#include <tuple>
#include "base/allocator/early_zone_registration_mac.h"
@ -313,10 +312,6 @@ void InitializeZone() {
g_mac_malloc_zone.claimed_address = nullptr;
}
namespace {
static std::atomic<bool> g_initialization_is_done;
}
// Replaces the default malloc zone with our own malloc zone backed by
// PartitionAlloc. Since we'd like to make as much code as possible to use our
// own memory allocator (and reduce bugs caused by mixed use of the system
@ -354,7 +349,6 @@ InitializeDefaultMallocZoneWithPartitionAlloc() {
// |EarlyMallocZoneRegistration()|.
malloc_zone_register(&g_mac_malloc_zone);
malloc_zone_unregister(system_default_zone);
g_initialization_is_done.store(true, std::memory_order_release);
return;
}
@ -379,17 +373,10 @@ InitializeDefaultMallocZoneWithPartitionAlloc() {
// Confirm that our own zone is now the default zone.
CHECK_EQ(GetDefaultMallocZone(), &g_mac_malloc_zone);
g_initialization_is_done.store(true, std::memory_order_release);
}
} // namespace
bool IsDefaultAllocatorPartitionRootInitialized() {
// Even though zone registration is not thread-safe, let's not make it worse,
// and use acquire/release ordering.
return g_initialization_is_done.load(std::memory_order_acquire);
}
} // namespace allocator_shim
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SHIM_ALLOCATOR_SHIM_OVERRIDE_MAC_DEFAULT_ZONE_H_

View File

@ -272,8 +272,7 @@ void CommitCardTable() {
#if PA_STARSCAN_USE_CARD_TABLE
RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
sizeof(QuarantineCardTable),
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
#endif
}
@ -1405,8 +1404,7 @@ PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
*metadata;
RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
state_bitmap_size_to_commit,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kReadWrite),
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
super_pages.push_back(super_page);
}

View File

@ -775,10 +775,6 @@ void ThreadCache::PurgeInternal() {
PurgeInternalHelper<true>();
}
void ThreadCache::ResetPerThreadAllocationStatsForTesting() {
thread_alloc_stats_ = {};
}
template <bool crash_on_corruption>
void ThreadCache::PurgeInternalHelper() {
should_purge_.store(false, std::memory_order_relaxed);

View File

@ -286,8 +286,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
// can happen either because the cache is full or the allocation was too
// large.
PA_ALWAYS_INLINE bool MaybePutInCache(uintptr_t slot_start,
size_t bucket_index,
size_t* slot_size);
size_t bucket_index);
// Tries to allocate a memory slot from the cache.
// Returns 0 on failure.
@ -315,9 +314,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
// Purge the thread cache of the current thread, if one exists.
static void PurgeCurrentThread();
const ThreadAllocStats& thread_alloc_stats() const {
return thread_alloc_stats_;
}
size_t bucket_count_for_testing(size_t index) const {
return buckets_[index].count;
}
@ -329,17 +325,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
// |kLargeSizeThreshold|.
static void SetLargestCachedSize(size_t size);
// Cumulative stats about *all* allocations made on the `root_` partition on
// this thread, that is not only the allocations serviced by the thread cache,
// but all allocations, including large and direct-mapped ones. This should in
// theory be split into a separate PerThread data structure, but the thread
// cache is the only per-thread data we have as of now.
//
// TODO(lizeb): Investigate adding a proper per-thread data structure.
PA_ALWAYS_INLINE void RecordAllocation(size_t size);
PA_ALWAYS_INLINE void RecordDeallocation(size_t size);
void ResetPerThreadAllocationStatsForTesting();
// Fill 1 / kBatchFillRatio * bucket.limit slots at a time.
static constexpr uint16_t kBatchFillRatio = 8;
@ -435,7 +420,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
uint32_t cached_memory_ = 0;
std::atomic<bool> should_purge_;
ThreadCacheStats stats_;
ThreadAllocStats thread_alloc_stats_;
// Buckets are quite big, though each is only 2 pointers.
Bucket buckets_[kBucketCount];
@ -482,8 +466,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
};
PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
size_t bucket_index,
size_t* slot_size) {
size_t bucket_index) {
PA_REENTRANCY_GUARD(is_in_thread_cache_);
PA_INCREMENT_COUNTER(stats_.cache_fill_count);
@ -513,7 +496,6 @@ PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
PurgeInternal();
*slot_size = bucket.slot_size;
return true;
}
@ -550,18 +532,6 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_DCHECK(bucket.count != 0);
internal::PartitionFreelistEntry* entry = bucket.freelist_head;
// TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
defined(PA_HAS_64_BITS_POINTERS)
// x86_64 architecture now supports 57 bits of address space, as of Ice Lake
// for Intel. However Chrome OS systems do not ship with kernel support for
// it, but with 48 bits, so all canonical addresses have the upper 16 bits
// zeroed (17 in practice, since the upper half of address space is reserved
// by the kernel).
constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
#endif
// Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to
// narrow down the search for culprit. |bucket| was touched just now, so this
@ -576,7 +546,6 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_DCHECK(cached_memory_ >= bucket.slot_size);
cached_memory_ -= bucket.slot_size;
return internal::SlotStartPtr2Addr(entry);
}
@ -643,16 +612,6 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
bucket.count++;
}
void ThreadCache::RecordAllocation(size_t size) {
thread_alloc_stats_.alloc_count++;
thread_alloc_stats_.alloc_total_size += size;
}
void ThreadCache::RecordDeallocation(size_t size) {
thread_alloc_stats_.dealloc_count++;
thread_alloc_stats_.dealloc_total_size += size;
}
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_

View File

@ -1,69 +0,0 @@
# //base/android
This directory contains:
* C++ APIs that are broadly useful and are unique to `target_os="android"`, and
* Java APIs that are broadly useful, along C++ bindings when necessary.
This directory does not contain Android-specific implementations / extensions
to APIs declared directly in `//base`. Those live in `//base/*_android.cc`, or
behind `#ifdef`s.
## Adding New APIs
The advice laid out in [//base/README.md] applies to this directory as well.
The bars for what APIs should exist and for code quality are generally higher
than for other directories. If you find yourself wanting to add a new API, you
should expect that code reviews take multiple revisions and that they be met
with (respectful) scrutiny.
If you are not sure whether an API would make sense to add, you can ask via
java@chromium.org.
It is common to add APIs to `//chrome` (or elsewhere) first, and move them into
`//base` after their usefulness has been proven.
[//base/README.md]: /base/README.md
### What Uses //base/android?
The main two clients are Chrome and WebView, but it is also used by other
Chromium-based apps, such as Chromecast and Chrome Remote desktop. Some
`//base/android` classes are used by `//build` (this is a layering violation,
tracked in [crbug/1364192] and [crbug/1377351]).
Two considerations for WebView:
1. The application Context is that of the host app's.
2. The UI thread might be different from the main thread.
[crbug/1364192]: https://crbug.com/1364192
[crbug/1377351]: https://crbug.com/1377351
### New API Checklist
Here is a list of checks you should go through when adding a new API:
1. The functionality does not already exist in system libraries (Java APIs,
Android SDK) or in already adopted `third_party` libraries, such as AndroidX.
2. Reasonable effort has been made to ensure the new API is discoverable. E.g.:
Coordinate refactorings of existing patterns to it, add a [presubmit check],
to recommend it, etc.
3. Tests (ideally Robolectric) are added.
4. Thought has been put into API design.
* E.g. adding `@Nullable`, or `@DoNotMock`
* E.g. adding test helpers, such as `ForTesting()` methods or `TestRule`s
* E.g. adding asserts or comments about thread-safety
* E.g. could usage of the API be made harder to get wrong?
[presumbit check]: https://chromium.googlesource.com/chromium/src/+/main/build/android/docs/java_toolchain.md#Static-Analysis-Code-Checks
### Choosing a Reviewer
All members of [`//base/android/OWNERS`] will be CC'ed on reviews through a
[`//WATCHLIST`] entry. For new APIs, feel free to pick a reviewer at random.
For modifying existing files, it is best to use a reviewer from prior changes to
the file.
[`//base/android/OWNERS`]: /base/android/OWNERS
[`//WATCHLIST`]: /WATCHLIST

View File

@ -14,11 +14,11 @@ namespace base {
AndroidHardwareBufferCompat::AndroidHardwareBufferCompat() {
DCHECK(IsSupportAvailable());
// TODO(https://crbug.com/1382595): If the Chromium build requires
// __ANDROID_API__ >= 26 at some point in the future, we could directly use
// the global functions instead of dynamic loading. However, since this would
// be incompatible with pre-Oreo devices, this is unlikely to happen in the
// foreseeable future, so just unconditionally use dynamic loading.
// TODO(klausw): If the Chromium build requires __ANDROID_API__ >= 26 at some
// point in the future, we could directly use the global functions instead of
// dynamic loading. However, since this would be incompatible with pre-Oreo
// devices, this is unlikely to happen in the foreseeable future, so just
// unconditionally use dynamic loading.
// cf. base/android/linker/modern_linker_jni.cc
void* main_dl_handle = dlopen(nullptr, RTLD_NOW);

View File

@ -11,6 +11,7 @@
#include "base/android/jni_android.h"
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
namespace base {
namespace android {

View File

@ -13,16 +13,13 @@
namespace base {
namespace android {
class ScopedAllowBlockingForImportantFileWriter
: public base::ScopedAllowBlocking {};
static jboolean JNI_ImportantFileWriterAndroid_WriteFileAtomically(
JNIEnv* env,
const JavaParamRef<jstring>& file_name,
const JavaParamRef<jbyteArray>& data) {
// This is called on the UI thread during shutdown to save tab data, so
// needs to enable IO.
ScopedAllowBlockingForImportantFileWriter allow_blocking;
base::ThreadRestrictions::ScopedAllowIO allow_io;
std::string native_file_name;
base::android::ConvertJavaStringToUTF8(env, file_name, &native_file_name);
base::FilePath path(native_file_name);

View File

@ -19,13 +19,12 @@
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_local.h"
#include "build/build_config.h"
namespace base {
namespace android {
namespace {
JavaVM* g_jvm = nullptr;
JavaVM* g_jvm = NULL;
base::LazyInstance<ScopedJavaGlobalRef<jobject>>::Leaky g_class_loader =
LAZY_INSTANCE_INITIALIZER;
jmethodID g_class_loader_load_class_method_id = 0;
@ -112,11 +111,7 @@ JNIEnv* AttachCurrentThread() {
args.name = thread_name;
}
#if BUILDFLAG(IS_ANDROID)
ret = g_jvm->AttachCurrentThread(&env, &args);
#else
ret = g_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), &args);
#endif
CHECK_EQ(JNI_OK, ret);
}
return env;
@ -126,14 +121,10 @@ JNIEnv* AttachCurrentThreadWithName(const std::string& thread_name) {
DCHECK(g_jvm);
JavaVMAttachArgs args;
args.version = JNI_VERSION_1_2;
args.name = const_cast<char*>(thread_name.c_str());
args.group = nullptr;
JNIEnv* env = nullptr;
#if BUILDFLAG(IS_ANDROID)
args.name = thread_name.c_str();
args.group = NULL;
JNIEnv* env = NULL;
jint ret = g_jvm->AttachCurrentThread(&env, &args);
#else
jint ret = g_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), &args);
#endif
CHECK_EQ(JNI_OK, ret);
return env;
}
@ -151,7 +142,7 @@ void InitVM(JavaVM* vm) {
}
bool IsVMInitialized() {
return g_jvm != nullptr;
return g_jvm != NULL;
}
void InitReplacementClassLoader(JNIEnv* env,

View File

@ -9,7 +9,7 @@
#include <sys/resource.h>
#include <sys/wait.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <memory>
@ -24,7 +24,6 @@
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
@ -68,7 +67,7 @@ std::pair<size_t, size_t> GetTextRange() {
// Set the end to the page on which the beginning of the last symbol is. The
// actual symbol may spill into the next page by a few bytes, but this is
// outside of the executable code range anyway.
size_t end_page = bits::AlignUp(kEndOfText, kPageSize);
size_t end_page = base::bits::AlignUp(kEndOfText, kPageSize);
return {start_page, end_page};
}
@ -78,7 +77,7 @@ std::pair<size_t, size_t> GetOrderedTextRange() {
size_t start_page = kStartOfOrderedText - kStartOfOrderedText % kPageSize;
// kEndOfUnorderedText is not considered ordered, but the byte immediately
// before is considered ordered and so can not be contained in the start page.
size_t end_page = bits::AlignUp(kEndOfOrderedText, kPageSize);
size_t end_page = base::bits::AlignUp(kEndOfOrderedText, kPageSize);
return {start_page, end_page};
}
@ -109,8 +108,8 @@ struct TimestampAndResidency {
bool CollectResidency(size_t start,
size_t end,
std::vector<TimestampAndResidency>* data) {
// Not using TimeTicks() to not call too many base:: symbol that would pollute
// the reached symbols dumps.
// Not using base::TimeTicks() to not call too many base:: symbol that would
// pollute the reached symbols dumps.
struct timespec ts;
if (HANDLE_EINTR(clock_gettime(CLOCK_MONOTONIC, &ts))) {
PLOG(ERROR) << "Cannot get the time.";
@ -130,9 +129,10 @@ void DumpResidency(size_t start,
size_t end,
std::unique_ptr<std::vector<TimestampAndResidency>> data) {
LOG(WARNING) << "Dumping native library residency";
auto path = FilePath(
StringPrintf("/data/local/tmp/chrome/residency-%d.txt", getpid()));
auto file = File(path, File::FLAG_CREATE_ALWAYS | File::FLAG_WRITE);
auto path = base::FilePath(
base::StringPrintf("/data/local/tmp/chrome/residency-%d.txt", getpid()));
auto file =
base::File(path, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
if (!file.IsValid()) {
PLOG(ERROR) << "Cannot open file to dump the residency data "
<< path.value();
@ -143,12 +143,13 @@ void DumpResidency(size_t start,
CHECK(AreAnchorsSane());
CHECK_LE(start, kStartOfText);
CHECK_LE(kEndOfText, end);
auto start_end = StringPrintf("%" PRIuS " %" PRIuS "\n", kStartOfText - start,
kEndOfText - start);
auto start_end = base::StringPrintf("%" PRIuS " %" PRIuS "\n",
kStartOfText - start, kEndOfText - start);
file.WriteAtCurrentPos(start_end.c_str(), static_cast<int>(start_end.size()));
for (const auto& data_point : *data) {
auto timestamp = StringPrintf("%" PRIu64 " ", data_point.timestamp_nanos);
auto timestamp =
base::StringPrintf("%" PRIu64 " ", data_point.timestamp_nanos);
file.WriteAtCurrentPos(timestamp.c_str(),
static_cast<int>(timestamp.size()));
@ -284,8 +285,9 @@ int NativeLibraryPrefetcher::PercentageOfResidentCode(size_t start,
if (!ok)
return -1;
total_pages += residency.size();
resident_pages += static_cast<size_t>(
ranges::count_if(residency, [](unsigned char x) { return x & 1; }));
resident_pages +=
static_cast<size_t>(std::count_if(residency.begin(), residency.end(),
[](unsigned char x) { return x & 1; }));
if (total_pages == 0)
return -1;
return static_cast<int>((100 * resident_pages) / total_pages);

View File

@ -180,7 +180,7 @@ __attribute__((always_inline, no_instrument_function)) void RecordAddress(
// Only the code in the native library is instrumented. Callees are expected
// to be within the native library bounds.
Disable();
ImmediateCrash();
IMMEDIATE_CRASH();
}
size_t offset = callee_address - start;

View File

@ -139,7 +139,7 @@ __attribute__((always_inline, no_instrument_function)) void RecordAddress(
// deadlock. By crashing immediately we at least have a chance to get a
// stack trace from the system to give some clue about the nature of the
// problem.
ImmediateCrash();
IMMEDIATE_CRASH();
}
// We should really crash at the first instance, but it does happen on bots,

View File

@ -32,7 +32,7 @@ class BASE_EXPORT ScopedHardwareBufferFenceSync {
// Provides fence which is signaled when the reads for this buffer are done
// and it can be reused. Must only be called once.
virtual void SetReadFence(base::ScopedFD fence_fd) = 0;
virtual void SetReadFence(base::ScopedFD fence_fd, bool has_context) = 0;
private:
ScopedHardwareBufferHandle handle_;

View File

@ -62,31 +62,22 @@ static void JNI_TraceEvent_RegisterEnabledObserver(JNIEnv* env) {
static void JNI_TraceEvent_StartATrace(
JNIEnv* env,
const JavaParamRef<jstring>& category_filter) {
// ATrace not supported for robolectric.
#if BUILDFLAG(IS_ANDROID)
std::string category_filter_utf8 =
ConvertJavaStringToUTF8(env, category_filter);
base::trace_event::TraceLog::GetInstance()->StartATrace(category_filter_utf8);
#endif
}
static void JNI_TraceEvent_StopATrace(JNIEnv* env) {
// ATrace not supported for robolectric.
#if BUILDFLAG(IS_ANDROID)
base::trace_event::TraceLog::GetInstance()->StopATrace();
#endif
}
static void JNI_TraceEvent_SetupATraceStartupTrace(
JNIEnv* env,
const JavaParamRef<jstring>& category_filter) {
// ATrace not supported for robolectric.
#if BUILDFLAG(IS_ANDROID)
std::string category_filter_utf8 =
ConvertJavaStringToUTF8(env, category_filter);
base::trace_event::TraceLog::GetInstance()->SetupATraceStartupTrace(
category_filter_utf8);
#endif
}
static jboolean JNI_TraceEvent_ViewHierarchyDumpEnabled(JNIEnv* env) {

View File

@ -73,7 +73,7 @@ class BASE_EXPORT AtExitManager {
#endif
// Stack of managers to allow shadowing.
const raw_ptr<AtExitManager, DanglingUntriaged> next_manager_;
const raw_ptr<AtExitManager> next_manager_;
};
#if defined(UNIT_TEST)

View File

@ -24,23 +24,6 @@
namespace {
// Returns true if the module for |address| is found. |path| will contain
// the path to the module. Note that |path| may not be absolute.
[[nodiscard]] bool GetModulePathForAddress(base::FilePath* path,
const void* address);
bool GetModulePathForAddress(base::FilePath* path, const void* address) {
Dl_info info;
if (dladdr(address, &info) == 0)
return false;
*path = base::FilePath(info.dli_fname);
return true;
}
} // namespace
namespace base {
void GetNSExecutablePath(base::FilePath* path) {
DCHECK(path);
// Executable path can have relative references ("..") depending on
@ -59,10 +42,27 @@ void GetNSExecutablePath(base::FilePath* path) {
// paths such as DIR_SRC_TEST_DATA_ROOT can work, since we expect absolute
// paths to be returned here.
// TODO(bauerb): http://crbug.com/259796, http://crbug.com/373477
base::ScopedAllowBlocking allow_blocking;
base::ThreadRestrictions::ScopedAllowIO allow_io;
*path = base::MakeAbsoluteFilePath(base::FilePath(executable_path));
}
// Returns true if the module for |address| is found. |path| will contain
// the path to the module. Note that |path| may not be absolute.
[[nodiscard]] bool GetModulePathForAddress(base::FilePath* path,
const void* address);
bool GetModulePathForAddress(base::FilePath* path, const void* address) {
Dl_info info;
if (dladdr(address, &info) == 0)
return false;
*path = base::FilePath(info.dli_fname);
return true;
}
} // namespace
namespace base {
bool PathProviderMac(int key, base::FilePath* result) {
switch (key) {
case base::FILE_EXE:

View File

@ -138,8 +138,8 @@ class BASE_EXPORT BigEndianWriter {
bool Write(T v);
// TODO(crbug.com/1298696): Breaks net_unittests.
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> ptr_;
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> end_;
raw_ptr<char, DegradeToNoOpWhenMTE> ptr_;
raw_ptr<char, DegradeToNoOpWhenMTE> end_;
};
} // namespace base

View File

@ -9,6 +9,7 @@
#include "base/debug/debugging_buildflags.h"
#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"

View File

@ -51,6 +51,11 @@ class VoidifyStream {
void operator&(std::ostream&) {}
};
// Helper macro which avoids evaluating the arguents to a stream if the
// condition is false.
#define LAZY_CHECK_STREAM(stream, condition) \
!(condition) ? (void)0 : ::logging::VoidifyStream() & (stream)
// Macro which uses but does not evaluate expr and any stream parameters.
#define EAT_CHECK_STREAM_PARAMS(expr) \
true ? (void)0 \
@ -88,44 +93,18 @@ class BASE_EXPORT CheckError {
CheckError(const CheckError&) = delete;
CheckError& operator=(const CheckError&) = delete;
template <typename T>
std::ostream& operator<<(T&& streamed_type) {
return stream() << streamed_type;
}
private:
explicit CheckError(LogMessage* log_message);
LogMessage* const log_message_;
};
// The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as:
// if (a == 1)
// CHECK(Foo());
//
// TODO(crbug.com/1380930): Remove the const bool when the blink-gc plugin has
// been updated to accept `if (LIKELY(!field_))` as well as `if (!field_)`.
#define CHECK_FUNCTION_IMPL(check_failure_invocation, condition) \
switch (0) \
case 0: \
default: \
if (const bool checky_bool_lol = static_cast<bool>(condition); \
LIKELY(ANALYZER_ASSUME_TRUE(checky_bool_lol))) \
; \
else \
check_failure_invocation
#define CHECK_FUNCTION_IMPL(check_function, condition) \
LAZY_CHECK_STREAM(check_function(__FILE__, __LINE__, #condition).stream(), \
!ANALYZER_ASSUME_TRUE(condition))
#if defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#error "Debug builds are not expected to be optimized as official builds."
#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#if defined(OFFICIAL_BUILD) && !DCHECK_IS_ON()
// Note that this uses IMMEDIATE_CRASH_ALWAYS_INLINE to force-inline in debug
// mode as well. See LoggingTest.CheckCausesDistinctBreakpoints.
[[noreturn]] IMMEDIATE_CRASH_ALWAYS_INLINE void CheckFailure() {
base::ImmediateCrash();
}
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
// Discard log strings to reduce code bloat.
//
@ -133,40 +112,33 @@ class BASE_EXPORT CheckError {
// calling an out-of-line function instead of a noreturn inline macro prevents
// compiler optimizations.
#define CHECK(condition) \
UNLIKELY(!(condition)) ? logging::CheckFailure() : EAT_CHECK_STREAM_PARAMS()
UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
#define CHECK_WILL_STREAM() false
// Strip the conditional string from official builds.
#define PCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck(__FILE__, __LINE__), \
condition)
#define PCHECK(condition) \
LAZY_CHECK_STREAM( \
::logging::CheckError::PCheck(__FILE__, __LINE__).stream(), \
UNLIKELY(!(condition)))
#else
#define CHECK_WILL_STREAM() true
#define CHECK(condition) \
CHECK_FUNCTION_IMPL( \
::logging::CheckError::Check(__FILE__, __LINE__, #condition), condition)
CHECK_FUNCTION_IMPL(::logging::CheckError::Check, condition)
#define PCHECK(condition) \
CHECK_FUNCTION_IMPL( \
::logging::CheckError::PCheck(__FILE__, __LINE__, #condition), \
condition)
#define PCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck, condition)
#endif
#if DCHECK_IS_ON()
#define DCHECK(condition) \
CHECK_FUNCTION_IMPL( \
::logging::CheckError::DCheck(__FILE__, __LINE__, #condition), \
condition)
#define DPCHECK(condition) \
CHECK_FUNCTION_IMPL( \
::logging::CheckError::DPCheck(__FILE__, __LINE__, #condition), \
condition)
#define DCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DCheck, condition)
#define DPCHECK(condition) \
CHECK_FUNCTION_IMPL(::logging::CheckError::DPCheck, condition)
#else

View File

@ -18,7 +18,7 @@
// See https://crbug.com/672699.
#define BLINK_RELEASE_ASSERT_EQUIVALENT(assertion) \
(UNLIKELY(!(assertion)) ? (base::ImmediateCrash()) : (void)0)
(UNLIKELY(!(assertion)) ? (IMMEDIATE_CRASH()) : (void)0)
void DoCheck(bool b) {
CHECK(b) << "DoCheck " << b;

View File

@ -18,19 +18,21 @@ void check_is_test_impl() {
} // namespace base::internal
namespace base::test {
// base/test/allow_check_is_test_for_testing.h declares
// `AllowCheckIsTestForTesting`, but is only allowed to be included in test
// code. We therefore have to also mark the symbol as exported here.
BASE_EXPORT void AllowCheckIsTestForTesting() {
// This CHECK ensures that `AllowCheckIsTestForTesting` is called
// just once. Since it is called in `base::TestSuite`, this should effectivly
// prevent calls to `AllowCheckIsTestForTesting` in production code
// (assuming that code has unit test coverage).
// base/test/allow_check_is_test_to_be_called.h declares
// `AllowCheckIsTestToBeCalled`, but is only allowed to be included in test
// code.
// We therefore have to also mark the symbol as exported here.
BASE_EXPORT void AllowCheckIsTestToBeCalled() {
LOG(WARNING) << "Allowing special test code paths";
// This CHECK ensures that `AllowCheckIsTestToBeCalled` is called just once.
// Since it is called in `base::TestSuite`, this should effectivly prevent
// calls to AllowCheckIsTestToBeCalled in production code (assuming that code
// has unit test coverage).
//
// This is just in case someone ignores the fact that this function in the
// `base::test` namespace and ends on "ForTesting".
CHECK(!g_this_is_a_test)
<< "AllowCheckIsTestForTesting must not be called more than once";
<< "AllowCheckIsTestToBeCalled must not be called more than once";
g_this_is_a_test = true;
}

View File

@ -140,22 +140,6 @@ class CheckOpResult {
char* message_ = nullptr;
};
// Helper macro for binary operators.
// The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as:
// if (a == 1)
// CHECK_EQ(2, a);
#define CHECK_OP_FUNCTION_IMPL(check_function, name, op, val1, val2) \
switch (0) \
case 0: \
default: \
if (::logging::CheckOpResult true_if_passed = \
::logging::Check##name##Impl((val1), (val2), \
#val1 " " #op " " #val2)) \
; \
else \
check_function(__FILE__, __LINE__, &true_if_passed)
#if !CHECK_WILL_STREAM()
// Discard log strings to reduce code bloat.
@ -163,8 +147,22 @@ class CheckOpResult {
#else
#define CHECK_OP(name, op, val1, val2) \
CHECK_OP_FUNCTION_IMPL(::logging::CheckError::CheckOp, name, op, val1, val2)
// Helper macro for binary operators.
// The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as:
// if (a == 1)
// CHECK_EQ(2, a);
#define CHECK_OP(name, op, val1, val2) \
switch (0) \
case 0: \
default: \
if (::logging::CheckOpResult true_if_passed = \
::logging::Check##name##Impl((val1), (val2), \
#val1 " " #op " " #val2)) \
; \
else \
::logging::CheckError::CheckOp(__FILE__, __LINE__, &true_if_passed) \
.stream()
#endif
@ -212,8 +210,17 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#if DCHECK_IS_ON()
#define DCHECK_OP(name, op, val1, val2) \
CHECK_OP_FUNCTION_IMPL(::logging::CheckError::DCheckOp, name, op, val1, val2)
#define DCHECK_OP(name, op, val1, val2) \
switch (0) \
case 0: \
default: \
if (::logging::CheckOpResult true_if_passed = \
::logging::Check##name##Impl((val1), (val2), \
#val1 " " #op " " #val2)) \
; \
else \
::logging::CheckError::DCheckOp(__FILE__, __LINE__, &true_if_passed) \
.stream()
#else

View File

@ -29,16 +29,11 @@ Google naming. Be sure to use the base namespace.
### Usage advice
* Do not use `base::flat_map` or `base::flat_set` if the number of items will
be large or unbounded and elements will be inserted/deleted outside of the
containers constructor/destructor - they have O(n) performance on inserts
and deletes of individual items.
* Do not default to using `std::unordered_set` and `std::unordered_map`. In
the common case, query performance is unlikely to be sufficiently higher
than `std::map` to make a difference, insert performance is slightly worse,
and the memory overhead is high. This makes sense mostly for large tables
where you expect a lot of lookups.
* Generally avoid `std::unordered_set` and `std::unordered_map`. In the common
case, query performance is unlikely to be sufficiently higher than
`std::map` to make a difference, insert performance is slightly worse, and
the memory overhead is high. This makes sense mostly for large tables where
you expect a lot of lookups.
* Most maps and sets in Chrome are small and contain objects that can be moved
efficiently. In this case, consider `base::flat_map` and `base::flat_set`.
@ -70,12 +65,12 @@ Google naming. Be sure to use the base namespace.
Sizes are on 64-bit platforms. Stable iterators aren't invalidated when the
container is mutated.
| Container | Empty size | Per-item overhead | Stable iterators? | Insert/delete complexity |
|:------------------------------------------ |:--------------------- |:----------------- |:----------------- |:-----------------------------|
| `std::map`, `std::set` | 16 bytes | 32 bytes | Yes | O(log n) |
| `std::unordered_map`, `std::unordered_set` | 128 bytes | 16 - 24 bytes | No | O(1) |
| `base::flat_map`, `base::flat_set` | 24 bytes | 0 (see notes) | No | O(n) |
| `base::small_map` | 24 bytes (see notes) | 32 bytes | No | depends on fallback map type |
| Container | Empty size | Per-item overhead | Stable iterators? |
|:------------------------------------------ |:--------------------- |:----------------- |:----------------- |
| `std::map`, `std::set` | 16 bytes | 32 bytes | Yes |
| `std::unordered_map`, `std::unordered_set` | 128 bytes | 16 - 24 bytes | No |
| `base::flat_map`, `base::flat_set` | 24 bytes | 0 (see notes) | No |
| `base::small_map` | 24 bytes (see notes) | 32 bytes | No |
**Takeaways:** `std::unordered_map` and `std::unordered_set` have high
overhead for small container sizes, so prefer these only for larger workloads.

View File

@ -10,8 +10,6 @@
#include <iterator>
#include <utility>
#include "base/memory/raw_ref.h"
namespace base {
namespace internal {
@ -26,15 +24,11 @@ class ReversedAdapter {
ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
ReversedAdapter& operator=(const ReversedAdapter&) = delete;
Iterator begin() const { return std::rbegin(*t_); }
Iterator end() const { return std::rend(*t_); }
Iterator begin() const { return std::rbegin(t_); }
Iterator end() const { return std::rend(t_); }
private:
// `ReversedAdapter` and therefore `t_` are only used inside for loops. The
// container being iterated over should be the one holding a raw_ref/raw_ptr
// ideally. This member's type was rewritten into `const raw_ref` since it
// didn't hurt binary size at the time of the rewrite.
const raw_ref<T> t_;
T& t_;
};
} // namespace internal

View File

@ -5,14 +5,11 @@
#ifndef BASE_CONTAINERS_BUFFER_ITERATOR_H_
#define BASE_CONTAINERS_BUFFER_ITERATOR_H_
#include <string.h>
#include <type_traits>
#include "base/bit_cast.h"
#include "base/containers/span.h"
#include "base/numerics/checked_math.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace base {
@ -73,12 +70,16 @@ class BufferIterator {
// position. On success, the iterator position is advanced by sizeof(T). If
// there are not sizeof(T) bytes remaining in the buffer, returns nullptr.
template <typename T,
typename = std::enable_if_t<std::is_trivially_copyable_v<T>>>
typename =
typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
T* MutableObject() {
size_t size = sizeof(T);
if (size > remaining_.size())
size_t next_position;
if (!CheckAdd(position(), size).AssignIfValid(&next_position))
return nullptr;
T* t = reinterpret_cast<T*>(remaining_.data());
if (next_position > total_size())
return nullptr;
T* t = bit_cast<T*>(remaining_.data());
remaining_ = remaining_.subspan(size);
return t;
}
@ -86,37 +87,29 @@ class BufferIterator {
// Returns a const pointer to an object of type T in the buffer at the current
// position.
template <typename T,
typename = std::enable_if_t<std::is_trivially_copyable_v<T>>>
typename =
typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
const T* Object() {
return MutableObject<const T>();
}
// Copies out an object. As compared to using Object, this avoids potential
// unaligned access which may be undefined behavior.
template <typename T,
typename = std::enable_if_t<std::is_trivially_copyable_v<T>>>
absl::optional<T> CopyObject() {
absl::optional<T> t;
if (remaining_.size() >= sizeof(T)) {
memcpy(&t.emplace(), remaining_.data(), sizeof(T));
remaining_ = remaining_.subspan(sizeof(T));
}
return t;
}
// Returns a span of |count| T objects in the buffer at the current position.
// On success, the iterator position is advanced by |sizeof(T) * count|. If
// there are not enough bytes remaining in the buffer to fulfill the request,
// returns an empty span.
template <typename T,
typename = std::enable_if_t<std::is_trivially_copyable_v<T>>>
typename =
typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
span<T> MutableSpan(size_t count) {
size_t size;
if (!CheckMul(sizeof(T), count).AssignIfValid(&size))
return span<T>();
if (size > remaining_.size())
size_t next_position;
if (!CheckAdd(position(), size).AssignIfValid(&next_position))
return span<T>();
auto result = span<T>(reinterpret_cast<T*>(remaining_.data()), count);
if (next_position > total_size())
return span<T>();
auto result = span<T>(bit_cast<T*>(remaining_.data()), count);
remaining_ = remaining_.subspan(size);
return result;
}
@ -124,7 +117,8 @@ class BufferIterator {
// Returns a span to |count| const objects of type T in the buffer at the
// current position.
template <typename T,
typename = std::enable_if_t<std::is_trivially_copyable_v<T>>>
typename =
typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
span<const T> Span(size_t count) {
return MutableSpan<const T>(count);
}
@ -132,19 +126,11 @@ class BufferIterator {
// Resets the iterator position to the absolute offset |to|.
void Seek(size_t to) { remaining_ = buffer_.subspan(to); }
// Limits the remaining data to the specified size.
// Seeking to an absolute offset reverses this.
void TruncateTo(size_t size) { remaining_ = remaining_.first(size); }
// Returns the total size of the underlying buffer.
size_t total_size() const { return buffer_.size(); }
size_t total_size() { return buffer_.size(); }
// Returns the current position in the buffer.
size_t position() const {
DCHECK(buffer_.data() <= remaining_.data());
DCHECK(remaining_.data() <= buffer_.data() + buffer_.size());
return static_cast<size_t>(remaining_.data() - buffer_.data());
}
size_t position() { return buffer_.size_bytes() - remaining_.size_bytes(); }
private:
// The original buffer that the iterator was constructed with.

View File

@ -263,11 +263,6 @@ class EnumSet {
return (enums_ & other.enums_) == other.enums_;
}
// Returns true if the given set contains any value of our set.
bool HasAny(EnumSet other) const {
return (enums_ & other.enums_).count() > 0;
}
// Returns true iff our set is empty.
bool Empty() const { return !enums_.any(); }

View File

@ -344,7 +344,6 @@ class flat_tree {
template <typename DummyT = void>
iterator erase(const_iterator position);
iterator erase(const_iterator first, const_iterator last);
size_type erase(const Key& key);
template <typename K>
size_type erase(const K& key);
@ -359,39 +358,33 @@ class flat_tree {
//
// Search operations have O(log(size)) complexity.
size_type count(const Key& key) const;
template <typename K>
size_type count(const K& key) const;
iterator find(const Key& key);
const_iterator find(const Key& key) const;
template <typename K>
iterator find(const K& key);
template <typename K>
const_iterator find(const K& key) const;
bool contains(const Key& key) const;
template <typename K>
bool contains(const K& key) const;
std::pair<iterator, iterator> equal_range(const Key& key);
std::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
template <typename K>
std::pair<iterator, iterator> equal_range(const K& key);
template <typename K>
std::pair<const_iterator, const_iterator> equal_range(const K& key) const;
iterator lower_bound(const Key& key);
const_iterator lower_bound(const Key& key) const;
template <typename K>
iterator lower_bound(const K& key);
template <typename K>
const_iterator lower_bound(const K& key) const;
iterator upper_bound(const Key& key);
const_iterator upper_bound(const Key& key) const;
template <typename K>
iterator upper_bound(const K& key);
template <typename K>
const_iterator upper_bound(const K& key) const;
@ -476,10 +469,7 @@ class flat_tree {
const K& extract_if_value_type(const K& k) const {
return k;
}
// This field was not rewritten into `const raw_ref<const key_compare>` due
// to binary size increase. There's also little value to rewriting this
// member as it points to `flat_tree::comp_`. The flat_tree itself should be
// holding raw_ptr/raw_ref if necessary.
const key_compare& comp_;
};
@ -910,16 +900,6 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::erase(
return body_.erase(position);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::erase(
const Key& val) -> size_type {
auto eq_range = equal_range(val);
auto res =
static_cast<size_type>(std::distance(eq_range.first, eq_range.second));
erase(eq_range.first, eq_range.second);
return res;
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::erase(const K& val)
@ -966,26 +946,6 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::count(
return static_cast<size_type>(std::distance(eq_range.first, eq_range.second));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::count(
const Key& key) const -> size_type {
auto eq_range = equal_range(key);
return static_cast<size_type>(std::distance(eq_range.first, eq_range.second));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(
const Key& key) -> iterator {
return const_cast_it(std::as_const(*this).find(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(
const Key& key) const -> const_iterator {
auto eq_range = equal_range(key);
return (eq_range.first == eq_range.second) ? end() : eq_range.first;
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(const K& key)
@ -1001,13 +961,6 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::find(
return (eq_range.first == eq_range.second) ? end() : eq_range.first;
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
bool flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::contains(
const Key& key) const {
auto lower = lower_bound(key);
return lower != end() && !comp_(key, GetKeyFromValue()(*lower));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
bool flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::contains(
@ -1016,25 +969,6 @@ bool flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::contains(
return lower != end() && !comp_(key, GetKeyFromValue()(*lower));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
const Key& key) -> std::pair<iterator, iterator> {
auto res = std::as_const(*this).equal_range(key);
return {const_cast_it(res.first), const_cast_it(res.second)};
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
const Key& key) const -> std::pair<const_iterator, const_iterator> {
auto lower = lower_bound(key);
KeyValueCompare comp(comp_);
if (lower == end() || comp(key, *lower))
return {lower, lower};
return {lower, std::next(lower)};
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
@ -1056,19 +990,6 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::equal_range(
return {lower, std::next(lower)};
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const Key& key) -> iterator {
return const_cast_it(std::as_const(*this).lower_bound(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const Key& key) const -> const_iterator {
KeyValueCompare comp(comp_);
return ranges::lower_bound(*this, key, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
@ -1090,19 +1011,6 @@ auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
return ranges::lower_bound(*this, key_ref, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const Key& key) -> iterator {
return const_cast_it(std::as_const(*this).upper_bound(key));
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const Key& key) const -> const_iterator {
KeyValueCompare comp(comp_);
return ranges::upper_bound(*this, key, comp);
}
template <class Key, class GetKeyFromValue, class KeyCompare, class Container>
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(

View File

@ -8,7 +8,6 @@
#include <stddef.h>
#include "base/check.h"
#include "base/memory/raw_ref.h"
namespace base {
@ -65,8 +64,8 @@ class RingBuffer {
public:
size_t index() const { return index_; }
const T* operator->() const { return &buffer_->ReadBuffer(index_); }
const T* operator*() const { return &buffer_->ReadBuffer(index_); }
const T* operator->() const { return &buffer_.ReadBuffer(index_); }
const T* operator*() const { return &buffer_.ReadBuffer(index_); }
Iterator& operator++() {
index_++;
@ -83,14 +82,14 @@ class RingBuffer {
}
operator bool() const {
return !out_of_range_ && buffer_->IsFilledIndex(index_);
return !out_of_range_ && buffer_.IsFilledIndex(index_);
}
private:
Iterator(const RingBuffer<T, kSize>& buffer, size_t index)
: buffer_(buffer), index_(index), out_of_range_(false) {}
const raw_ref<const RingBuffer<T, kSize>> buffer_;
const RingBuffer<T, kSize>& buffer_;
size_t index_;
bool out_of_range_;

View File

@ -17,15 +17,27 @@
#include "base/no_destructor.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
BUILDFLAG(IS_AIX)
#include "base/containers/flat_set.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
#include "base/notreached.h"
#include "base/process/internal_linux.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_restrictions.h"
#endif
#if defined(ARCH_CPU_ARM_FAMILY) && \
(BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
#include <asm/hwcap.h>
#include <sys/auxv.h>
#include "base/files/file_util.h"
#include "base/numerics/checked_math.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@ -378,6 +390,267 @@ CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
}
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
BUILDFLAG(IS_AIX)
namespace {
constexpr char kTimeInStatePath[] =
"/sys/devices/system/cpu/cpu%" PRIuS "/cpufreq/stats/time_in_state";
constexpr char kPhysicalPackageIdPath[] =
"/sys/devices/system/cpu/cpu%" PRIuS "/topology/physical_package_id";
constexpr char kCoreIdleStateTimePath[] =
"/sys/devices/system/cpu/cpu%" PRIuS "/cpuidle/state%d/time";
bool SupportsTimeInState() {
// Reading from time_in_state doesn't block (it amounts to reading a struct
// from the cpufreq-stats kernel driver).
ThreadRestrictions::ScopedAllowIO allow_io;
// Check if the time_in_state path for the first core is readable.
FilePath time_in_state_path(
StringPrintf(kTimeInStatePath, /*core_index=*/size_t{0}));
ScopedFILE file_stream(OpenFile(time_in_state_path, "rb"));
return static_cast<bool>(file_stream);
}
bool ParseTimeInState(const std::string& content,
CPU::CoreType core_type,
size_t core_index,
CPU::TimeInState& time_in_state) {
const char* begin = content.data();
size_t max_pos = content.size() - 1;
// Example time_in_state content:
// ---
// 300000 1
// 403200 0
// 499200 15
// ---
// Iterate over the individual lines.
for (size_t pos = 0; pos <= max_pos;) {
int num_chars = 0;
// Each line should have two integer fields, frequency (kHz) and time (in
// jiffies), separated by a space, e.g. "2419200 132".
uint64_t frequency;
int64_t time;
int matches = sscanf(begin + pos, "%" PRIu64 " %" PRId64 "\n%n", &frequency,
&time, &num_chars);
if (matches != 2)
return false;
// Skip zero-valued entries in the output list (no time spent at this
// frequency).
if (time > 0) {
time_in_state.push_back({core_type, core_index, frequency,
internal::ClockTicksToTimeDelta(time)});
}
// Advance line.
DCHECK_GT(num_chars, 0);
pos += static_cast<size_t>(num_chars);
}
return true;
}
bool SupportsCoreIdleTimes() {
// Reading from the cpuidle driver doesn't block.
ThreadRestrictions::ScopedAllowIO allow_io;
// Check if the path for the idle time in state 0 for core 0 is readable.
FilePath idle_state0_path(StringPrintf(
kCoreIdleStateTimePath, /*core_index=*/size_t{0}, /*idle_state=*/0));
ScopedFILE file_stream(OpenFile(idle_state0_path, "rb"));
return static_cast<bool>(file_stream);
}
std::vector<CPU::CoreType> GuessCoreTypes() {
// Try to guess the CPU architecture and cores of each cluster by comparing
// the maximum frequencies of the available (online and offline) cores.
const char kCPUMaxFreqPath[] =
"/sys/devices/system/cpu/cpu%" PRIuS "/cpufreq/cpuinfo_max_freq";
size_t num_cpus = static_cast<size_t>(SysInfo::NumberOfProcessors());
std::vector<CPU::CoreType> core_index_to_type(num_cpus,
CPU::CoreType::kUnknown);
std::vector<uint32_t> max_core_frequencies_mhz(num_cpus, 0);
flat_set<uint32_t> frequencies_mhz;
{
// Reading from cpuinfo_max_freq doesn't block (it amounts to reading a
// struct field from the cpufreq kernel driver).
ThreadRestrictions::ScopedAllowIO allow_io;
for (size_t core_index = 0; core_index < num_cpus; ++core_index) {
std::string content;
uint32_t frequency_khz = 0;
auto path = StringPrintf(kCPUMaxFreqPath, core_index);
if (ReadFileToString(FilePath(path), &content))
StringToUint(content, &frequency_khz);
uint32_t frequency_mhz = frequency_khz / 1000;
max_core_frequencies_mhz[core_index] = frequency_mhz;
if (frequency_mhz > 0)
frequencies_mhz.insert(frequency_mhz);
}
}
size_t num_frequencies = frequencies_mhz.size();
for (size_t core_index = 0; core_index < num_cpus; ++core_index) {
uint32_t core_frequency_mhz = max_core_frequencies_mhz[core_index];
CPU::CoreType core_type = CPU::CoreType::kOther;
if (num_frequencies == 1u) {
core_type = CPU::CoreType::kSymmetric;
} else if (num_frequencies == 2u || num_frequencies == 3u) {
auto it = frequencies_mhz.find(core_frequency_mhz);
if (it != frequencies_mhz.end()) {
// flat_set is sorted.
ptrdiff_t frequency_index = it - frequencies_mhz.begin();
switch (frequency_index) {
case 0:
core_type = num_frequencies == 2u
? CPU::CoreType::kBigLittle_Little
: CPU::CoreType::kBigLittleBigger_Little;
break;
case 1:
core_type = num_frequencies == 2u
? CPU::CoreType::kBigLittle_Big
: CPU::CoreType::kBigLittleBigger_Big;
break;
case 2:
DCHECK_EQ(num_frequencies, 3u);
core_type = CPU::CoreType::kBigLittleBigger_Bigger;
break;
default:
NOTREACHED();
break;
}
}
}
core_index_to_type[core_index] = core_type;
}
return core_index_to_type;
}
} // namespace
// static
const std::vector<CPU::CoreType>& CPU::GetGuessedCoreTypes() {
static NoDestructor<std::vector<CoreType>> kCoreTypes(GuessCoreTypes());
return *kCoreTypes.get();
}
// static
bool CPU::GetTimeInState(TimeInState& time_in_state) {
time_in_state.clear();
// The kernel may not support the cpufreq-stats driver.
static const bool kSupportsTimeInState = SupportsTimeInState();
if (!kSupportsTimeInState)
return false;
static const std::vector<CoreType>& kCoreTypes = GetGuessedCoreTypes();
// time_in_state is reported per cluster. Identify the first cores of each
// cluster.
static NoDestructor<std::vector<size_t>> kFirstCoresIndexes([]() {
std::vector<size_t> first_cores;
int last_core_package_id = 0;
for (size_t core_index = 0;
core_index < static_cast<size_t>(SysInfo::NumberOfProcessors());
core_index++) {
// Reading from physical_package_id doesn't block (it amounts to reading a
// struct field from the kernel).
ThreadRestrictions::ScopedAllowIO allow_io;
FilePath package_id_path(
StringPrintf(kPhysicalPackageIdPath, core_index));
std::string package_id_str;
if (!ReadFileToString(package_id_path, &package_id_str))
return std::vector<size_t>();
int package_id;
base::StringPiece trimmed = base::TrimWhitespaceASCII(
package_id_str, base::TrimPositions::TRIM_ALL);
if (!base::StringToInt(trimmed, &package_id))
return std::vector<size_t>();
if (last_core_package_id != package_id || core_index == 0)
first_cores.push_back(core_index);
last_core_package_id = package_id;
}
return first_cores;
}());
if (kFirstCoresIndexes->empty())
return false;
// Reading from time_in_state doesn't block (it amounts to reading a struct
// from the cpufreq-stats kernel driver).
ThreadRestrictions::ScopedAllowIO allow_io;
// Read the time_in_state for each cluster from the /sys directory of the
// cluster's first core.
for (size_t cluster_core_index : *kFirstCoresIndexes) {
FilePath time_in_state_path(
StringPrintf(kTimeInStatePath, cluster_core_index));
std::string buffer;
if (!ReadFileToString(time_in_state_path, &buffer))
return false;
if (!ParseTimeInState(buffer, kCoreTypes[cluster_core_index],
cluster_core_index, time_in_state)) {
return false;
}
}
return true;
}
// static
bool CPU::GetCumulativeCoreIdleTimes(CoreIdleTimes& idle_times) {
idle_times.clear();
// The kernel may not support the cpufreq-stats driver.
static const bool kSupportsIdleTimes = SupportsCoreIdleTimes();
if (!kSupportsIdleTimes)
return false;
// Reading from the cpuidle driver doesn't block.
ThreadRestrictions::ScopedAllowIO allow_io;
size_t num_cpus = static_cast<size_t>(SysInfo::NumberOfProcessors());
bool success = false;
for (size_t core_index = 0; core_index < num_cpus; ++core_index) {
std::string content;
TimeDelta idle_time;
// The number of idle states is system/CPU dependent, so we increment and
// try to read each state until we fail.
for (int state_index = 0;; ++state_index) {
auto path = StringPrintf(kCoreIdleStateTimePath, core_index, state_index);
uint64_t idle_state_time = 0;
if (!ReadFileToString(FilePath(path), &content))
break;
StringToUint64(content, &idle_state_time);
idle_time += Microseconds(idle_state_time);
}
idle_times.push_back(idle_time);
// At least one of the cores should have some idle time, otherwise we report
// a failure.
success |= idle_time.is_positive();
}
return success;
}
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
// BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_AIX)
const CPU& CPU::GetInstanceNoAllocation() {
static const base::NoDestructor<const CPU> cpu(CPU(false));

View File

@ -6,8 +6,10 @@
#define BASE_CPU_H_
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
@ -112,6 +114,60 @@ class BASE_EXPORT CPU final {
#endif
const std::string& cpu_brand() const { return cpu_brand_; }
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
BUILDFLAG(IS_AIX)
enum class CoreType {
kUnknown = 0,
kOther,
kSymmetric,
kBigLittle_Little,
kBigLittle_Big,
kBigLittleBigger_Little,
kBigLittleBigger_Big,
kBigLittleBigger_Bigger,
kMaxValue = kBigLittleBigger_Bigger
};
// Attempts to guess the core types of individual CPU cores based on frequency
// information from /sys/devices/system/cpu/cpuN/cpufreq/cpuinfo_max_freq.
// Beware that it is kernel/hardware dependent whether the information from
// sys is accurate. Returns a reference to a static-storage vector (leaked on
// shutdown) with the guessed type for core N at index N.
static const std::vector<CoreType>& GetGuessedCoreTypes();
struct TimeInStateEntry {
CPU::CoreType core_type; // type of the cores in this cluster.
size_t cluster_core_index; // index of the first core in the cluster.
uint64_t core_frequency_khz;
TimeDelta cumulative_time;
};
using TimeInState = std::vector<TimeInStateEntry>;
// For each CPU core, emits the cumulative time spent in different frequency
// states into the output parameter (replacing its current contents). One
// entry in the output parameter is added for each cluster core index
// + frequency state combination with a non-zero CPU time value. Returns false
// on failure. We return the usage via an output parameter to allow reuse of
// TimeInState's std::vector by the caller, e.g. to avoid allocations between
// repeated calls to this method.
//
// NOTE: Currently only supported on Linux/Android, and only on kernels with
// cpufreq-stats driver.
static bool GetTimeInState(TimeInState&);
// For each CPU core, emits the total cumulative wall time spent in any idle
// state into the output parameter (replacing its current contents). Returns
// false on failure. We return the usage via an output parameter to allow
// reuse of TimeInState's std::vector by the caller, e.g. to avoid allocations
// between repeated calls to this method.
//
// NOTE: Currently only supported on Linux/Android, and only on kernels with
// cpuidle driver.
using CoreIdleTimes = std::vector<TimeDelta>;
static bool GetCumulativeCoreIdleTimes(CoreIdleTimes&);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
// BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_AIX)
private:
// Query the processor for CPUID information.
void Initialize(bool requires_branding);

View File

@ -5,24 +5,10 @@
#ifndef BASE_CXX20_TO_ADDRESS_H_
#define BASE_CXX20_TO_ADDRESS_H_
#include <memory>
#include <type_traits>
namespace base {
namespace {
template <typename Ptr, typename = void>
struct has_std_to_address : std::false_type {};
template <typename Ptr>
struct has_std_to_address<
Ptr,
std::void_t<decltype(std::pointer_traits<Ptr>::to_address(
std::declval<Ptr>()))>> : std::true_type {};
} // namespace
// Implementation of C++20's std::to_address.
// Note: This does consider specializations of pointer_traits<>::to_address,
// even though it's a C++20 member function, because CheckedContiguousIterator
@ -37,12 +23,14 @@ constexpr T* to_address(T* p) noexcept {
}
template <typename Ptr>
constexpr auto to_address(const Ptr& p) noexcept {
if constexpr (has_std_to_address<Ptr>::value) {
return std::pointer_traits<Ptr>::to_address(p);
} else {
return base::to_address(p.operator->());
}
constexpr auto to_address(const Ptr& p) noexcept
-> decltype(std::pointer_traits<Ptr>::to_address(p)) {
return std::pointer_traits<Ptr>::to_address(p);
}
template <typename Ptr, typename... None>
constexpr auto to_address(const Ptr& p, None...) noexcept {
return base::to_address(p.operator->());
}
} // namespace base

View File

@ -4,8 +4,6 @@
#include "base/debug/crash_logging.h"
#include <ostream>
#include "base/strings/string_piece.h"
#include "build/build_config.h"

View File

@ -4,8 +4,6 @@
#include "base/debug/dwarf_line_no.h"
#include "base/memory/raw_ref.h"
#ifdef USE_SYMBOLIZE
#include <algorithm>
#include <cstdint>
@ -190,7 +188,7 @@ void EvaluateLineNumberProgram(const int fd,
private:
raw_ptr<LineNumberInfo> info;
uint64_t module_relative_pc;
const raw_ref<const ProgramInfo> program_info;
const ProgramInfo& program_info;
public:
OnCommitImpl(LineNumberInfo* info,
@ -215,7 +213,7 @@ void EvaluateLineNumberProgram(const int fd,
module_relative_pc >= registers->address)
return;
if (registers->last_file < program_info->num_filenames) {
if (registers->last_file < program_info.num_filenames) {
info->line = registers->last_line;
info->column = registers->last_column;
@ -224,19 +222,19 @@ void EvaluateLineNumberProgram(const int fd,
// follow spec, but seems to be common behavior. See the following LLVM
// bug for more info: https://reviews.llvm.org/D11003
if (registers->last_file == 0 &&
program_info->filename_offsets[0] == 0 &&
1 < program_info->num_filenames) {
program_info->filename_offsets[0] = program_info->filename_offsets[1];
program_info->filename_dirs[0] = program_info->filename_dirs[1];
program_info.filename_offsets[0] == 0 &&
1 < program_info.num_filenames) {
program_info.filename_offsets[0] = program_info.filename_offsets[1];
program_info.filename_dirs[0] = program_info.filename_dirs[1];
}
if (registers->last_file < kMaxFilenames) {
info->module_filename_offset =
program_info->filename_offsets[registers->last_file];
program_info.filename_offsets[registers->last_file];
uint8_t dir = program_info->filename_dirs[registers->last_file];
info->module_dir_offset = program_info->directory_offsets[dir];
info->dir_size = program_info->directory_sizes[dir];
uint8_t dir = program_info.filename_dirs[registers->last_file];
info->module_dir_offset = program_info.directory_offsets[dir];
info->dir_size = program_info.directory_sizes[dir];
}
}
}

View File

@ -98,7 +98,7 @@ void StackTrace::OutputToStreamWithPrefix(std::ostream* os,
// on fatal log messages in debug builds only. If the restriction is enabled
// then it will recursively trigger fatal failures when this enters on the
// UI thread.
base::ScopedAllowBlocking scoped_allow_blocking;
base::ThreadRestrictions::ScopedAllowIO allow_io;
if (!ReadProcMaps(&proc_maps)) {
__android_log_write(
ANDROID_LOG_ERROR, "chromium", "Failed to read /proc/self/maps");

View File

@ -201,22 +201,6 @@ void SymbolMap::Populate() {
valid_ = true;
}
// Returns true if |address| is contained by any of the memory regions
// mapped for |module_entry|.
bool ModuleContainsFrameAddress(void* address,
const SymbolMap::Module& module_entry) {
for (size_t i = 0; i < module_entry.segment_count; ++i) {
const SymbolMap::Segment& segment = module_entry.segments[i];
const void* segment_end = reinterpret_cast<const void*>(
reinterpret_cast<const char*>(segment.addr) + segment.size - 1);
if (address >= segment.addr && address <= segment_end) {
return true;
}
}
return false;
}
} // namespace
// static
@ -247,22 +231,12 @@ void StackTrace::OutputToStreamWithPrefix(std::ostream* os,
SymbolMap map;
int module_id = 0;
for (const SymbolMap::Module& module_entry : map.GetModules()) {
// Don't emit information on modules that aren't useful for the actual
// stack trace, so as to reduce the load on the symbolizer and syslog.
bool should_emit_module = false;
for (size_t i = 0; i < count_ && !should_emit_module; ++i) {
should_emit_module = ModuleContainsFrameAddress(trace_[i], module_entry);
}
if (!should_emit_module) {
continue;
}
for (const SymbolMap::Module& entry : map.GetModules()) {
*os << "{{{module:" << module_id << ":" << entry.name
<< ":elf:" << entry.build_id << "}}}\n";
*os << "{{{module:" << module_id << ":" << module_entry.name
<< ":elf:" << module_entry.build_id << "}}}\n";
for (size_t i = 0; i < module_entry.segment_count; ++i) {
const SymbolMap::Segment& segment = module_entry.segments[i];
for (size_t i = 0; i < entry.segment_count; ++i) {
const SymbolMap::Segment& segment = entry.segments[i];
char permission_string[4] = {};
*os << "{{{mmap:" << segment.addr << ":0x" << std::hex << segment.size

View File

@ -109,6 +109,16 @@ struct FeatureEntry {
}
};
// Some characters are not allowed to appear in feature names or the associated
// field trial names, as they are used as special characters for command-line
// serialization. This function checks that the strings are ASCII (since they
// are used in command-line API functions that require ASCII) and whether there
// are any reserved characters present, returning true if the string is valid.
// Only called in DCHECKs.
bool IsValidFeatureOrFieldTrialName(StringPiece name) {
return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
}
// Splits |text| into two parts by the |separator| where the first part will be
// returned updated in |first| and the second part will be returned as |second|.
// This function returns false if there is more than one |separator| in |first|.
@ -391,11 +401,6 @@ bool FeatureList::IsEnabled(const Feature& feature) {
return g_feature_list_instance->IsFeatureEnabled(feature);
}
// static
bool FeatureList::IsValidFeatureOrFieldTrialName(StringPiece name) {
return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
}
// static
absl::optional<bool> FeatureList::GetStateIfOverridden(const Feature& feature) {
#if DCHECK_IS_ON()

View File

@ -384,14 +384,6 @@ class BASE_EXPORT FeatureList {
// struct, which is checked in builds with DCHECKs enabled.
static bool IsEnabled(const Feature& feature);
// Some characters are not allowed to appear in feature names or the
// associated field trial names, as they are used as special characters for
// command-line serialization. This function checks that the strings are ASCII
// (since they are used in command-line API functions that require ASCII) and
// whether there are any reserved characters present, returning true if the
// string is valid.
static bool IsValidFeatureOrFieldTrialName(StringPiece name);
// If the given |feature| is overridden, returns its enabled state; otherwise,
// returns an empty optional. Must only be called after the singleton instance
// has been registered via SetInstance(). Additionally, a feature with a given

View File

@ -171,10 +171,10 @@ std::u16string FileVersionInfoWin::GetStringValue(const char16_t* name) const {
}
base::Version FileVersionInfoWin::GetFileVersion() const {
return base::Version({HIWORD(fixed_file_info_->dwFileVersionMS),
LOWORD(fixed_file_info_->dwFileVersionMS),
HIWORD(fixed_file_info_->dwFileVersionLS),
LOWORD(fixed_file_info_->dwFileVersionLS)});
return base::Version({HIWORD(fixed_file_info_.dwFileVersionMS),
LOWORD(fixed_file_info_.dwFileVersionMS),
HIWORD(fixed_file_info_.dwFileVersionLS),
LOWORD(fixed_file_info_.dwFileVersionLS)});
}
FileVersionInfoWin::FileVersionInfoWin(std::vector<uint8_t>&& data,

View File

@ -16,7 +16,6 @@
#include "base/base_export.h"
#include "base/file_version_info.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/raw_ref.h"
#include "base/version.h"
struct tagVS_FIXEDFILEINFO;
@ -72,7 +71,7 @@ class BASE_EXPORT FileVersionInfoWin : public FileVersionInfo {
const WORD code_page_;
// This is a reference for a portion of |data_|.
const raw_ref<const VS_FIXEDFILEINFO> fixed_file_info_;
const VS_FIXEDFILEINFO& fixed_file_info_;
};
#endif // BASE_FILE_VERSION_INFO_WIN_H_

View File

@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/memory/ptr_util.h"
#include "base/memory/raw_ref.h"
#include "base/message_loop/message_pump_for_io.h"
#include "base/no_destructor.h"
#include "base/synchronization/waitable_event.h"
@ -72,7 +71,7 @@ class FileDescriptorWatcher::Controller::Watcher
// WaitableEvent to signal to ensure that the Watcher is always destroyed
// before the Controller.
const raw_ref<base::WaitableEvent> on_destroyed_;
base::WaitableEvent& on_destroyed_;
// Whether this Watcher is notified when |fd_| becomes readable or writable
// without blocking.
@ -110,7 +109,7 @@ FileDescriptorWatcher::Controller::Watcher::~Watcher() {
// Stop watching the descriptor before signalling |on_destroyed_|.
CHECK(fd_watch_controller_.StopWatchingFileDescriptor());
on_destroyed_->Signal();
on_destroyed_.Signal();
}
void FileDescriptorWatcher::Controller::Watcher::StartWatching() {

View File

@ -132,14 +132,7 @@
// Macros for string literal initialization of FilePath::CharType[].
#if BUILDFLAG(IS_WIN)
// The `FILE_PATH_LITERAL_INTERNAL` indirection allows `FILE_PATH_LITERAL` to
// work correctly with macro parameters, for example
// `FILE_PATH_LITERAL(TEST_FILE)` where `TEST_FILE` is a macro #defined as
// "TestFile".
#define FILE_PATH_LITERAL_INTERNAL(x) L##x
#define FILE_PATH_LITERAL(x) FILE_PATH_LITERAL_INTERNAL(x)
#define FILE_PATH_LITERAL(x) L##x
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#define FILE_PATH_LITERAL(x) x
#endif // BUILDFLAG(IS_WIN)

View File

@ -20,9 +20,8 @@ FilePathWatcher::~FilePathWatcher() {
// static
bool FilePathWatcher::RecursiveWatchAvailable() {
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_AIX) || \
BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_AIX)
return true;
#else
// FSEvents isn't available on iOS.

View File

@ -119,13 +119,6 @@ class InotifyReader {
#else
using Watch = int;
#endif
// Record of watchers tracked for watch descriptors.
struct WatcherEntry {
scoped_refptr<SequencedTaskRunner> task_runner;
WeakPtr<FilePathWatcherImpl> watcher;
};
static constexpr Watch kInvalidWatch = static_cast<Watch>(-1);
static constexpr Watch kWatchLimitExceeded = static_cast<Watch>(-2);
@ -148,6 +141,12 @@ class InotifyReader {
private:
friend struct LazyInstanceTraitsBase<InotifyReader>;
// Record of watchers tracked for watch descriptors.
struct WatcherEntry {
scoped_refptr<SequencedTaskRunner> task_runner;
WeakPtr<FilePathWatcherImpl> watcher;
};
InotifyReader();
// There is no destructor because |g_inotify_reader| is a
// base::LazyInstace::Leaky object. Having a destructor causes build
@ -198,8 +197,11 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
// would exceed the limit if adding one more.
bool WouldExceedWatchLimit() const;
// Returns a WatcherEntry for this, must be called on the original sequence.
InotifyReader::WatcherEntry GetWatcherEntry();
// Returns the task runner to be used with this.
scoped_refptr<SequencedTaskRunner> GetTaskRunner() const;
// Returns the WeakPtr of this, must be called on the original sequence.
WeakPtr<FilePathWatcherImpl> GetWeakPtr() const;
private:
// Start watching |path| for changes and notify |delegate| on each change.
@ -363,7 +365,8 @@ InotifyReader::Watch InotifyReader::AddWatch(const FilePath& path,
return kInvalidWatch;
const Watch watch = static_cast<Watch>(watch_int);
watchers_[watch].emplace(std::make_pair(watcher, watcher->GetWatcherEntry()));
watchers_[watch].emplace(std::make_pair(
watcher, WatcherEntry{watcher->GetTaskRunner(), watcher->GetWeakPtr()}));
return watch;
}
@ -554,9 +557,14 @@ bool FilePathWatcherImpl::WouldExceedWatchLimit() const {
return number_of_inotify_watches >= GetMaxNumberOfInotifyWatches();
}
InotifyReader::WatcherEntry FilePathWatcherImpl::GetWatcherEntry() {
scoped_refptr<SequencedTaskRunner> FilePathWatcherImpl::GetTaskRunner() const {
DCHECK(task_runner()->RunsTasksInCurrentSequence());
return {task_runner(), weak_factory_.GetWeakPtr()};
return task_runner();
}
WeakPtr<FilePathWatcherImpl> FilePathWatcherImpl::GetWeakPtr() const {
DCHECK(task_runner()->RunsTasksInCurrentSequence());
return weak_factory_.GetWeakPtr();
}
bool FilePathWatcherImpl::Watch(const FilePath& path,

View File

@ -367,12 +367,6 @@ BASE_EXPORT ScopedFILE CreateAndOpenTemporaryStreamInDir(const FilePath& dir,
// the format of prefixyyyy.
// NOTE: prefix is ignored in the POSIX implementation.
// If success, return true and output the full path of the directory created.
//
// For Windows, this directory is usually created in a secure location under
// %ProgramFiles% if the caller is admin. This is because the default %TEMP%
// folder for Windows is insecure, since low privilege users can get the path of
// folders under %TEMP% after creation and are able to create subfolders and
// files within these folders which can lead to privilege escalation.
BASE_EXPORT bool CreateNewTempDirectory(const FilePath::StringType& prefix,
FilePath* new_temp_path);
@ -610,6 +604,26 @@ BASE_EXPORT bool VerifyPathControlledByAdmin(const base::FilePath& path);
// the directory |path|, in the number of FilePath::CharType, or -1 on failure.
BASE_EXPORT int GetMaximumPathComponentLength(const base::FilePath& path);
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_AIX)
// Broad categories of file systems as returned by statfs() on Linux.
enum FileSystemType {
FILE_SYSTEM_UNKNOWN, // statfs failed.
FILE_SYSTEM_0, // statfs.f_type == 0 means unknown, may indicate AFS.
FILE_SYSTEM_ORDINARY, // on-disk filesystem like ext2
FILE_SYSTEM_NFS,
FILE_SYSTEM_SMB,
FILE_SYSTEM_CODA,
FILE_SYSTEM_MEMORY, // in-memory file system
FILE_SYSTEM_CGROUP, // cgroup control.
FILE_SYSTEM_OTHER, // any other value.
FILE_SYSTEM_TYPE_COUNT
};
// Attempts determine the FileSystemType for |path|.
// Returns false if |path| doesn't exist.
BASE_EXPORT bool GetFileSystemType(const FilePath& path, FileSystemType* type);
#endif
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
// Get a temporary directory for shared memory files. The directory may depend
// on whether the destination is intended for executable files, which in turn

View File

@ -0,0 +1,63 @@
// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/files/file_util.h"
#include <errno.h>
#include <linux/magic.h>
#include <sys/vfs.h>
#include "base/files/file_path.h"
namespace base {
bool GetFileSystemType(const FilePath& path, FileSystemType* type) {
struct statfs statfs_buf;
if (statfs(path.value().c_str(), &statfs_buf) < 0) {
if (errno == ENOENT)
return false;
*type = FILE_SYSTEM_UNKNOWN;
return true;
}
// Not all possible |statfs_buf.f_type| values are in linux/magic.h.
// Missing values are copied from the statfs man page.
switch (static_cast<int>(statfs_buf.f_type)) {
case 0:
*type = FILE_SYSTEM_0;
break;
case EXT2_SUPER_MAGIC: // Also ext3 and ext4
case MSDOS_SUPER_MAGIC:
case REISERFS_SUPER_MAGIC:
case static_cast<int>(BTRFS_SUPER_MAGIC):
case 0x5346544E: // NTFS
case 0x58465342: // XFS
case 0x3153464A: // JFS
*type = FILE_SYSTEM_ORDINARY;
break;
case NFS_SUPER_MAGIC:
*type = FILE_SYSTEM_NFS;
break;
case SMB_SUPER_MAGIC:
case static_cast<int>(0xFF534D42): // CIFS
*type = FILE_SYSTEM_SMB;
break;
case CODA_SUPER_MAGIC:
*type = FILE_SYSTEM_CODA;
break;
case static_cast<int>(HUGETLBFS_MAGIC):
case static_cast<int>(RAMFS_MAGIC):
case TMPFS_MAGIC:
*type = FILE_SYSTEM_MEMORY;
break;
case CGROUP_SUPER_MAGIC:
*type = FILE_SYSTEM_CGROUP;
break;
default:
*type = FILE_SYSTEM_OTHER;
}
return true;
}
} // namespace base

View File

@ -30,7 +30,6 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/path_service.h"
#include "base/process/process_handle.h"
#include "base/rand_util.h"
#include "base/strings/strcat.h"
@ -56,7 +55,6 @@ namespace {
const DWORD kFileShareAll =
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
const wchar_t kDefaultTempDirPrefix[] = L"ChromiumTemp";
// Returns the Win32 last error code or ERROR_SUCCESS if the last error code is
// ERROR_FILE_NOT_FOUND or ERROR_PATH_NOT_FOUND. This is useful in cases where
@ -622,33 +620,15 @@ bool CreateTemporaryDirInDir(const FilePath& base_dir,
return false;
}
// The directory is created under %ProgramFiles% for security reasons if the
// caller is admin. Since only admin can write to %ProgramFiles%, this avoids
// attacks from lower privilege processes.
//
// If unable to create a dir under %ProgramFiles%, the dir is created under
// %TEMP%. The reasons for not being able to create a dir under %ProgramFiles%
// could be because we are unable to resolve `DIR_PROGRAM_FILES`, say due to
// registry redirection, or unable to create a directory due to %ProgramFiles%
// being read-only or having atypical ACLs.
bool CreateNewTempDirectory(const FilePath::StringType& prefix,
FilePath* new_temp_path) {
ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
DCHECK(new_temp_path);
FilePath parent_dir;
if (::IsUserAnAdmin() && PathService::Get(DIR_PROGRAM_FILES, &parent_dir) &&
CreateTemporaryDirInDir(parent_dir,
prefix.empty() ? kDefaultTempDirPrefix : prefix,
new_temp_path)) {
return true;
}
if (!GetTempDir(&parent_dir))
FilePath system_temp_dir;
if (!GetTempDir(&system_temp_dir))
return false;
return CreateTemporaryDirInDir(parent_dir, prefix, new_temp_path);
return CreateTemporaryDirInDir(system_temp_dir, prefix, new_temp_path);
}
bool CreateDirectoryAndGetError(const FilePath& full_path,

View File

@ -29,7 +29,7 @@ std::array<std::atomic_bool, kMaxTrackedFds> g_is_fd_owned;
NOINLINE void CrashOnFdOwnershipViolation() {
RAW_LOG(ERROR, "Crashing due to FD ownership violation:\n");
base::debug::StackTrace().Print();
base::ImmediateCrash();
IMMEDIATE_CRASH();
}
bool CanTrack(int fd) {

Some files were not shown because too many files have changed in this diff Show More