Compare commits

..

No commits in common. "12a6ba324f8bc147f3f7ba299e9264e68d2d1a25" and "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" have entirely different histories.

1793 changed files with 28699 additions and 43316 deletions

View File

@ -1 +1 @@
111.0.5563.64 112.0.5615.49

View File

@ -126,6 +126,7 @@ Anton Obzhirov <a.obzhirov@samsung.com>
Antonin Hildebrand <antonin.hildebrand@gmail.com> Antonin Hildebrand <antonin.hildebrand@gmail.com>
Antonio Gomes <a1.gomes@sisa.samsung.com> Antonio Gomes <a1.gomes@sisa.samsung.com>
Anuj Kumar Sharma <anujk.sharma@samsung.com> Anuj Kumar Sharma <anujk.sharma@samsung.com>
Ao Hui <aohui.wan@gmail.com>
Ao Sun <ntusunao@gmail.com> Ao Sun <ntusunao@gmail.com>
Ao Wang <wangao.james@bytedance.com> Ao Wang <wangao.james@bytedance.com>
Aquibuzzaman Md. Sayem <md.sayem@samsung.com> Aquibuzzaman Md. Sayem <md.sayem@samsung.com>
@ -277,6 +278,7 @@ Daniel Waxweiler <daniel.waxweiler@gmail.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu> Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu> Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com> Daniil Suvorov <severecloud@gmail.com>
Danny Weiss <danny.weiss.fr@gmail.com>
Daoming Qiu <daoming.qiu@intel.com> Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com> Darik Harter <darik.harter@gmail.com>
Darshan Sen <raisinten@gmail.com> Darshan Sen <raisinten@gmail.com>
@ -376,6 +378,7 @@ Feifei Wang <alexswang@tencent.com>
Felipe Erias Morandeira <felipeerias@gmail.com> Felipe Erias Morandeira <felipeerias@gmail.com>
Felix H. Dahlke <fhd@ubercode.de> Felix H. Dahlke <fhd@ubercode.de>
Felix Weilbach <feweilbach@gmail.com> Felix Weilbach <feweilbach@gmail.com>
Feng Shengyuan <fengshengyuan@agora.io>
Feng Yu <f3n67u@gmail.com> Feng Yu <f3n67u@gmail.com>
Fengrong Fang <fr.fang@samsung.com> Fengrong Fang <fr.fang@samsung.com>
Fernando Jiménez Moreno <ferjmoreno@gmail.com> Fernando Jiménez Moreno <ferjmoreno@gmail.com>
@ -605,6 +608,7 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com> John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com> John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com> Johnson Lin <johnson.lin@intel.com>
Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com> Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me> Jonathan Garbee <jonathan@garbee.me>
Jonathan Hacker <jhacker@arcanefour.com> Jonathan Hacker <jhacker@arcanefour.com>
@ -640,6 +644,7 @@ Julien Isorce <j.isorce@samsung.com>
Julien Racle <jracle@logitech.com> Julien Racle <jracle@logitech.com>
Jun Fang <jun_fang@foxitsoftware.com> Jun Fang <jun_fang@foxitsoftware.com>
Jun Jiang <jun.a.jiang@intel.com> Jun Jiang <jun.a.jiang@intel.com>
Junbong Eom <jb.eom@samsung.com>
Jungchang Park <valley84265@gmail.com> Jungchang Park <valley84265@gmail.com>
Junchao Han <junchao.han@intel.com> Junchao Han <junchao.han@intel.com>
Junghoon Lee <sjh836@gmail.com> Junghoon Lee <sjh836@gmail.com>
@ -740,6 +745,7 @@ Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com> Li Yin <li.yin@intel.com>
Lidwine Genevet <lgenevet@cisco.com> Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com> Lin Sun <lin.sun@intel.com>
Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com> Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com> Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com> Lingyun Cai <lingyun.cai@intel.com>
@ -762,7 +768,7 @@ Luke Seunghoe Gu <gulukesh@gmail.com>
Luke Zarko <lukezarko@gmail.com> Luke Zarko <lukezarko@gmail.com>
Luoxi Pan <l.panpax@gmail.com> Luoxi Pan <l.panpax@gmail.com>
Lu Yahan <yahan@iscas.ac.cn> Lu Yahan <yahan@iscas.ac.cn>
Ma Aiguo <maaiguo@uniontech.com> Ma Aiguo <imaiguo@gmail.com>
Maarten Lankhorst <m.b.lankhorst@gmail.com> Maarten Lankhorst <m.b.lankhorst@gmail.com>
Maciej Pawlowski <m.pawlowski@eyeo.com> Maciej Pawlowski <m.pawlowski@eyeo.com>
Magnus Danielsson <fuzzac@gmail.com> Magnus Danielsson <fuzzac@gmail.com>
@ -915,6 +921,7 @@ Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com> Nolan Cao <nolan.robin.cao@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com> Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com> Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net>
Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com> Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com>
Omar Sandoval <osandov@osandov.com> Omar Sandoval <osandov@osandov.com>
Owen Yuwono <owenyuwono@gmail.com> Owen Yuwono <owenyuwono@gmail.com>
@ -1199,6 +1206,7 @@ Suyash Nayan <suyashnyn1@gmail.com>
Suyash Sengar <suyash.s@samsung.com> Suyash Sengar <suyash.s@samsung.com>
Swarali Raut <swarali.sr@samsung.com> Swarali Raut <swarali.sr@samsung.com>
Swati Jaiswal <swa.jaiswal@samsung.com> Swati Jaiswal <swa.jaiswal@samsung.com>
Syed Wajid <syed.wajid@samsung.com>
Sylvain Zimmer <sylvinus@gmail.com> Sylvain Zimmer <sylvinus@gmail.com>
Sylvestre Ledru <sylvestre.ledru@gmail.com> Sylvestre Ledru <sylvestre.ledru@gmail.com>
Synthia Islam <synthia.is@samsung.com> Synthia Islam <synthia.is@samsung.com>

358
src/DEPS
View File

@ -229,7 +229,7 @@ vars = {
# #
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '15326.0.0', 'lacros_sdk_version': '15357.0.0',
# Generate location tag metadata to include in tests result data uploaded # Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates # to ResultDB. This isn't needed on some configs and the tool that generates
@ -241,7 +241,7 @@ vars = {
# luci-go CIPD package version. # luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder. # Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:221383f749a2c5b8587449d3d2e4982857daa9e7', 'luci_go': 'git_revision:8a8b4f2ea65c7ff5fde8a0c522008aed78d42d9d',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD # This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision. # instead of downloading the prebuilt pinned revision.
@ -286,11 +286,6 @@ vars = {
# Rust toolchain. # Rust toolchain.
'checkout_rust_toolchain_deps': False, 'checkout_rust_toolchain_deps': False,
# The Rust toolchain sources. It is a version tag from an instance of the
# CIPD `chromium/third_party/rust_src` package. This field is written by
# //tools/clang/scripts/upload_revision.py and shouldn't be updated by hand.
'rust_toolchain_version': 'version:2@2022-12-09',
'android_git': 'https://android.googlesource.com', 'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com', 'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com', 'boringssl_git': 'https://boringssl.googlesource.com',
@ -304,34 +299,34 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia # the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other. # and whatever else without interference from each other.
'skia_revision': '59932b057f281ddaeb0926ecfac55486270f8c51', 'skia_revision': 'f5fefe5245098be43cb608eace5e14d67cdc09e6',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8 # the commit queue can handle CLs rolling V8
# and whatever else without interference from each other. # and whatever else without interference from each other.
'v8_revision': '1cee747760b14aa78503a22ba1a3ab97b968fa28', 'v8_revision': '96fed67922e5f54a027aed80259e5083769e33e2',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE # the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other. # and whatever else without interference from each other.
'angle_revision': 'cd45d155bf4cf7404061f37e974a048914ca4610', 'angle_revision': '293db5ce4d0766cb3ba7711057a00f0a5bddb00d',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader # the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other. # and whatever else without interference from each other.
'swiftshader_revision': 'aae98adc2222dcada4aa952cccad48ab08e34004', 'swiftshader_revision': '3575b5479af54e471ea6750a8585e2c9bc87801c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium # the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other. # and whatever else without interference from each other.
'pdfium_revision': 'd087df316170b2d8757487b1015244384624478e', 'pdfium_revision': '4090d4c0f9873f5f50b630c26c2439b5297a6e49',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL # the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other. # and whatever else without interference from each other.
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '45b8d7bbd771cbf7e116db2ba1f1cc7af959497e', 'boringssl_revision': 'ca1690e221677cea3fb946f324eb89d846ec53f2',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk # the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other. # and whatever else without interference from each other.
'fuchsia_version': 'version:11.20230126.1.1', 'fuchsia_version': 'version:11.20230223.1.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -351,11 +346,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl # the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nacl_revision': '5b530a9ffd34be8541037262cf47cf3924bfc973', 'nacl_revision': '417b413dbe94a861ee050d42daf945cca02dba11',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_revision': '82ce172669f132839debe6e50a3a53fe88db9e31', 'freetype_revision': '3f01161ff22c84c371b6dc3b5e0351e0d6e8e771',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -375,15 +370,23 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult # the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other. # and whatever else without interference from each other.
'catapult_revision': 'abaad53f0c4e104ab630b314af2902ad83b82c8c', 'catapult_revision': '4274fe29dac91b7713244daebb6f1d2364d97193',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling CrossBench
# and whatever else without interference from each other.
'crossbench_revision': '27639d495e1cec411073bc82ba1fe368ce0ca89a',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer # the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libfuzzer_revision': 'debe7d2d1982e540fbd6bd78604bf001753f9e74', 'libfuzzer_revision': 'debe7d2d1982e540fbd6bd78604bf001753f9e74',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling centipede
# and whatever else without interference from each other.
'centipede_revision': '63ed43d2bfa2c04e291e81d643a5581c9be90c53',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend # the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other. # and whatever else without interference from each other.
'devtools_frontend_revision': 'f0bf0ece4aae3192fa2f0f2859f668cb343791be', 'devtools_frontend_revision': '2436ae2c5444ba8008a9f092301209a87aef0483',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator # the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -411,6 +414,10 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version # the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other. # and whatever else without interference from each other.
'android_sdk_platforms_preview_version': 'YWMYkzyxGBgVsty0GhXL1oxbY0pGXQIgFc0Rh7ZMRPYC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC', 'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version # the commit queue can handle CLs rolling android_sdk_sources_version
@ -419,11 +426,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'dawn_revision': 'e5193f1ffd232ebf7adfd403114edde08d162663', 'dawn_revision': 'de24841411c4cfb13662238327f2e456c82d26f6',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'quiche_revision': '566b33c9fa5b1723db04be3d40dcaf102344c323', 'quiche_revision': '40c87d454d762f3daaeca334cd2dc962f0476b13',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit # the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -443,7 +450,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby # the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nearby_revision': '26973fada5175060db140d7e1157cce6b604dc6a', 'nearby_revision': 'd260feced56cfdea53f941883c250d635ed6064d',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage # the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -455,19 +462,19 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'cros_components_revision': '5e449ecf7311cba83cdcfc1b2ae449f40d22f29e', 'cros_components_revision': 'fb2448fc618b4e3634c8c4097b4a84fcfa34c540',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'resultdb_version': 'git_revision:ac21cf9883af7d1bf6d60e8a7448918eb3b6aa18', 'resultdb_version': 'git_revision:ebc74d10fa0d64057daa6f128e89f3672eeeec95',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libcxxabi_revision': 'd520d582aa710cc0a4635620c02c5dbc187deb4f', 'libcxxabi_revision': '87d8fe050bedb143f232b9ff99a0a46897640e5d',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libunwind_revision': 'e95b94b74d26f8387d4fb03a687a2fab0ed8e91c', 'libunwind_revision': 'c5e861c7b48ee121d3719b7b5c14175c47ec5cc9',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -475,18 +482,18 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'highway_revision': '8ae5b88670fb918f815b717c7c13d38a9b0eb4bb', 'highway_revision': '58746ca5b9f9444a2a3549704602ecc6239f8f41',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg # the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other. # and whatever else without interference from each other.
'ffmpeg_revision': 'a249b21db6516234e5456716ae074fbb00176b3f', 'ffmpeg_revision': 'ee0c52d52036ecadfd38affec86c04937480bedb',
# If you change this, also update the libc++ revision in # If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni. # //buildtools/deps_revisions.gni.
'libcxx_revision': '1127c78cf90cf253be614a1e1d3645da57edbeb4', 'libcxx_revision': 'e136ec5032a5e5d97e988ce66e8c269a80ff54c4',
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:5e19d2fb166fbd4f6f32147fbb2f497091a54ad8', 'gn_version': 'git_revision:b25a2f8c2d33f02082f0f258350f5e22c0973108',
# ninja CIPD package version. # ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -579,19 +586,6 @@ deps = {
'condition': 'host_os == "linux"', 'condition': 'host_os == "linux"',
}, },
# Rust sources are used to build the Rust standard library, and on toolchain
# build bots, to build the Rust toolchain.
'src/third_party/rust_src/src': {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': Var('rust_toolchain_version'),
},
],
'dep_type': 'cipd',
'condition': 'checkout_rust_toolchain_deps or use_rust',
},
# We don't know target_cpu at deps time. At least until there's a universal # We don't know target_cpu at deps time. At least until there's a universal
# binary of httpd-php, pull both intel and arm versions in DEPS and then pick # binary of httpd-php, pull both intel and arm versions in DEPS and then pick
# the right one at runtime. # the right one at runtime.
@ -631,7 +625,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/android_webview/tools/cts_archive', 'package': 'chromium/android_webview/tools/cts_archive',
'version': 'C4m9-gZKvvtS0-KQ9cMRi6wEfZH_TeWSiXEgsgtGtsMC', 'version': 'APYMYksv9eNAkU6osFvNXN38pMO1Q1kScjeecePr7NgC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -654,7 +648,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_mac_universal', 'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': '1c74wqdugvZKRLfu6oY7wkYQ_VpIAObl_7TDwLQw8w4C', 'version': 'u1XGTm7703jO-Ax33P8j-x_KijOeHd36aBA6SO8V3a8C',
}, },
], ],
} }
@ -665,7 +659,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_mac_universal_prod', 'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'aFiR8jLJBXsy6aYQhQp8cd9yBEmqa_cJZwx0ltJbKT4C', 'version': 'qDhUmRj82uhWqE2eVqt12e1eJKWKgRpRjgQrBSrdyP4C',
}, },
], ],
}, },
@ -676,7 +670,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86', 'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'G5yIDMjjCL2TyjU-EmLubZkkb1sLM0XdZ5zB1XmviQkC', 'version': 'RmZn_R0BOPSbruD15DEq9pfu5IhhtjoJX6z-ufrWnD4C',
}, },
], ],
}, },
@ -687,7 +681,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86_64', 'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'LHw1kjfI3H94qB22t7YsgnMQyXBBgxCgtub5F-GxSCEC', 'version': 'AAes70A2b8-CLhU1h9Sh1x2K-N3WjG7j2Tlp6VOgmnQC',
}, },
], ],
}, },
@ -699,7 +693,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_amd64', 'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': 'Rd4fkayO0xqiXgM-WjFwSTh1YECDXF5ZfcSLW_odlz0C', 'version': '5ui7_fqpvI7a8omWqM8iyD0PZFPJpYKoMHkAb6xA_TkC',
}, },
], ],
}, },
@ -711,7 +705,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_arm64', 'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'UoyUoEkFS37BkNrD1mNVfqdDlPujDLGwaGBdWz7bPakC', 'version': '0KnizXQ2_n_V3aEHR240LqMKw7b0-Pm9VBUmVuQh0cAC',
}, },
], ],
}, },
@ -722,7 +716,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86', 'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'Sl2g34_swdY9lIDQA6pTzPSTM5tec284DtwYekj9BIwC', 'version': 'g_24x4tVzQIoveectPGIXIGc2ptYDTnOodXieF_OG_4C',
}, },
], ],
}, },
@ -733,18 +727,18 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86_64', 'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'vnZtwC2H42TkFgDMIYizwAUGFXTMOm00-yjnB6gnXigC', 'version': 's6U9lpJZsILIKkP5bCc_QqvJjPW2MwMWg5IoCBt_YEYC',
}, },
], ],
}, },
'src/chrome/test/data/autofill/captured_sites': { 'src/chrome/test/data/autofill/captured_sites/artifacts': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + '58a7920c173397b57d8d7be95cb93c2b43d02e26', 'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + 'a38d810c87a18582e986b94650d9cfa4b67be12c',
'condition': 'checkout_chromium_autofill_test_dependencies', 'condition': 'checkout_chromium_autofill_test_dependencies',
}, },
'src/chrome/test/data/password/captured_sites': { 'src/chrome/test/data/password/captured_sites/artifacts': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '93dfc8b7199a285a2ed832e607b0e68c5544273a', 'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '04b3ea663adf745c52831650e2426b54bc94e65d',
'condition': 'checkout_chromium_password_manager_test_dependencies', 'condition': 'checkout_chromium_password_manager_test_dependencies',
}, },
@ -770,21 +764,21 @@ deps = {
'src/clank': { 'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' + 'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'7d4e93f3d1693f1dfe471527e93a8e729ce149a3', 'd83811f32343245218e742319cec89aaefb94657',
'condition': 'checkout_android and checkout_src_internal', 'condition': 'checkout_android and checkout_src_internal',
}, },
'src/docs/website': { 'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '7e351332addd1fca691bb524c976a56f17e3eb95', 'url': Var('chromium_git') + '/website.git' + '@' + '182a630499559e1403237d14e2bc6302d384ed39',
}, },
'src/ios/third_party/earl_grey2/src': { 'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '99ba3b6ed7b8489899f06a0d602e84fc657e8338', 'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '8ac47627cb9ba09bf4bc3489b7aff5d77cd6eb88',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/edo/src': { 'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'd4d6f7da76f34b87b7b953176ef9e08eda83afb1', 'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '51058a369f943064dc6db4f38dca32263d584ea5',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -799,7 +793,7 @@ deps = {
}, },
'src/ios/third_party/material_components_ios/src': { 'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '2cdac2db582f6067b014aa66a3846588352361a1', 'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'a407ef3ac3220882732e701804613c44704ebf78',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -869,7 +863,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/linux-amd64', 'package': 'chromium/rts/model/linux-amd64',
'version': 'bCwganuATTWjTe2ahjxynn8rnTuSw900jtLOYmi3kV4C', 'version': 'E7vzLhZk6xAJnnioidm0-d5a4cz1OxOr0LJUsCkKKJ0C',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -880,7 +874,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/mac-amd64', 'package': 'chromium/rts/model/mac-amd64',
'version': 'ms15aJhiLzrF61zOZxq2jcESbsF3FFYtCS8R290t8JsC', 'version': '4wYh3p2y6ATe5OeiGmtl-G9thdrKGoX5DHzaP8V_tecC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -891,7 +885,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/windows-amd64', 'package': 'chromium/rts/model/windows-amd64',
'version': 'xILgcx3FOZDE8fCy3EXmw76GOIrdmlVb3aQ5dUSq8x8C', 'version': 'E5Y3kcrVZt1PybXoGxTDRb_KmswZX_5jBphOaHwm2fQC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -959,7 +953,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/androidx', 'package': 'chromium/third_party/androidx',
'version': '8KUWsjmvRQJlzdaro14SgP8nok3-kHY2h00BEjXLJqQC', 'version': 'zEg6hTXAR6Mcqyfh8gHDzD9fmyy1xwz4swj6pkENIYsC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -997,7 +991,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/aapt2', 'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'cbNG7g8Sinh-lsT8hWsU-RyXqLT_uh4jIb1fjCdhrzIC', 'version': '36NqCian2RIwuM6SFfizdUgKoXyZhy3q6pFfsws0szYC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1019,7 +1013,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/bundletool', 'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'XIPSJgFHEHN1ogOJqWVktlbl8PTfLZdNf_G2h4GcnrYC', 'version': 'TpDdbF-PPgwL0iOVsdLM07L-DUp2DV3hgzCMmPd2_GUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1030,7 +1024,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/lint', 'package': 'chromium/third_party/android_build_tools/lint',
'version': 'EPj7vnLteKz9kMQ6x4ZPaM5E20Bt4I0wTdrxdBpruZMC', 'version': 'MSpv-kFDDSPO0SY0dLdHegUJcJT1Yy8cL9r3vlAZ9vkC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1041,7 +1035,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/manifest_merger', 'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '5Zw4RYBL86koJro2O-jjcZYxOOdEW-hJDYykae8efQAC', 'version': 'EbRaK62t9grqlZqL-JTd_zwM4t1u9fm1x4c2rLE0cqQC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1075,6 +1069,10 @@ deps = {
'package': 'chromium/third_party/android_sdk/public/platforms/android-33', 'package': 'chromium/third_party/android_sdk/public/platforms/android-33',
'version': Var('android_sdk_platforms_version'), 'version': Var('android_sdk_platforms_version'),
}, },
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-tiramisuprivacysandbox',
'version': Var('android_sdk_platforms_preview_version'),
},
{ {
'package': 'chromium/third_party/android_sdk/public/sources/android-31', 'package': 'chromium/third_party/android_sdk/public/sources/android-31',
'version': Var('android_sdk_sources_version'), 'version': Var('android_sdk_sources_version'),
@ -1122,7 +1120,7 @@ deps = {
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'), Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/breakpad/breakpad': 'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + '79326ebe9446add03e76b4422ff8036e812224d2', Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'abb105db21e962eda5b7d9b7a0ac8dd701e0b987',
'src/third_party/byte_buddy': { 'src/third_party/byte_buddy': {
'packages': [ 'packages': [
@ -1147,7 +1145,7 @@ deps = {
}, },
'src/third_party/cast_core/public/src': 'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + 'f4628fda1b370eb238ae69545024d256ca62d719', Var('chromium_git') + '/cast_core/public' + '@' + 'e42ef68aa05ac0c163805f60b9b19284f3c2dee3',
'src/third_party/catapult': 'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'), Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1176,7 +1174,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple # Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild. # Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': { 'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'd60807b98527df86e516532b8e2a62a1cb44c128', 'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '52efbfdc210624f1895d5994149f53c3a4457f29',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1194,23 +1192,27 @@ deps = {
# For Linux and Chromium OS. # For Linux and Chromium OS.
'src/third_party/cros_system_api': { 'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'e0bfd3d75917adfa22e401805f9f9793cec82559', 'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'ffb88930c81ef7f7026a2433c424d8b3658580d4',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
'src/third_party/crossbench':
Var('chromium_git') + '/crossbench.git' + '@' + Var('crossbench_revision'),
'src/third_party/crubit/src': { 'src/third_party/crubit/src': {
'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'), 'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'),
'condition': 'checkout_crubit', 'condition': 'checkout_crubit',
}, },
'src/third_party/depot_tools': 'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'c023a6302fc665bae743a5833dea350fd3d0749f', Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a6898e71abf374d699ebaa121312e89bad8f100a',
'src/third_party/devtools-frontend/src': 'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'), Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': { 'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '4c3517346586ea020e5859cf51488e534a90d15e', 'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '2ac32de43d557d678de46fb7cfc850187f3379fd',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1218,7 +1220,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d', Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src': 'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '6156797016164b87b3e360e02d0e4107f7f66fbc', Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '3460f3558e7b469efb8a225894e21929c8c77629',
'src/third_party/emoji-metadata/src': { 'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06', 'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
@ -1246,11 +1248,11 @@ deps = {
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '689da3a7ed50af7448c3f1961d1791c7c1d9c85c', Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '689da3a7ed50af7448c3f1961d1791c7c1d9c85c',
'src/third_party/flatbuffers/src': 'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'b47ba1d5ffae3bd4d5eaad615e33d7cc5c1e3d4a', Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'a56f9ec50e908362e20254fcef28e62a2f148d91',
# Used for embedded builds. CrOS & Linux use the system version. # Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': { 'src/third_party/fontconfig/src': {
'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '452be8125f0e2a18a7dfef469e05d19374d36307', 'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '06929a556fdc39c8fe12965b69070c8df520a33e',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1370,7 +1372,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e', Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu': 'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '4e100720a20a471ca5ceff3b15a87596b694ada4', Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'faf22e66ceafad90f5cafbd6707055c24646adcc',
'src/third_party/icu4j': { 'src/third_party/icu4j': {
'packages': [ 'packages': [
@ -1433,11 +1435,22 @@ deps = {
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'src/third_party/kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/kotlin_stdlib',
'version': 'Mg7371mEUwDQH4_z29HdWqYWVlXN6t2dXX0kIutg_SwC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/kotlinc/current': { 'src/third_party/kotlinc/current': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/kotlinc', 'package': 'chromium/third_party/kotlinc',
'version': 'F-v9Yy4tNQtjGB7TtAWc2J-3qhx9Q6ixZJyuGixVH08C', 'version': 'bCZedwoM-hb1pP1QKzA3P5aR4zjZltqLj4JQpmQsHuUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1450,11 +1463,14 @@ deps = {
'src/third_party/libFuzzer/src': 'src/third_party/libFuzzer/src':
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'), Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
'src/third_party/centipede/src':
Var('chromium_git') + '/external/github.com/google/centipede.git' + '@' + Var('centipede_revision'),
'src/third_party/libaddressinput/src': 'src/third_party/libaddressinput/src':
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd', Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd',
'src/third_party/libaom/source/libaom': 'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '74d61ae86f20bc9fb707347bfe618425024f3865', Var('aomedia_git') + '/aom.git' + '@' + '70b12695e1967d9589dd15b345a039e575e8f429',
'src/third_party/libavif/src': 'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'), Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1487,7 +1503,7 @@ deps = {
}, },
'src/third_party/libjpeg_turbo': 'src/third_party/libjpeg_turbo':
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'ed683925e4897a84b3bffc5c1414c85b97a129a3', Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '0b6e6a152242c2fa30ffb29633d1b7d7b229b46b',
'src/third_party/liblouis/src': { 'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376', 'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
@ -1515,16 +1531,16 @@ deps = {
}, },
'src/third_party/libvpx/source/libvpx': 'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'b7c22b3a9584d7d9c0a7b9b37a52bc595113b398', Var('chromium_git') + '/webm/libvpx.git' + '@' + 'db69ce6aea278bee88668fd9cc2af2e544516fdb',
'src/third_party/libwebm/source': 'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da', Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libwebp/src': 'src/third_party/libwebp/src':
Var('chromium_git') + '/webm/libwebp.git' + '@' + '603e8d7adb0ccc35237419c2938194623b60e9be', Var('chromium_git') + '/webm/libwebp.git' + '@' + 'fd7b5d48464475408d32d2611bdb6947d4246b97',
'src/third_party/libyuv': 'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '6e4b0acb4b3d5858c77a044aad46132998ac4a76', Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '2bdc210be9eb11ded16bf3ef1f6cadb0d4dcb0c2',
'src/third_party/lighttpd': { 'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'), 'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1651,7 +1667,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9', Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9',
'src/third_party/openscreen/src': 'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + 'eca304d29cee3f9d045fd0dd36f147a91a367c75', Var('chromium_git') + '/openscreen' + '@' + '5d694418bc76f66463f06ce141c375062b0ba3b0',
'src/third_party/openxr/src': { 'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245', 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1668,7 +1684,7 @@ deps = {
}, },
'src/third_party/perfetto': 'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '3aa2acd9af48d097ad94cf778c2228031e6c4dfa', Var('android_git') + '/platform/external/perfetto.git' + '@' + '4bda78645d1d23a98473b793bc532a3ebff6c7f9',
'src/third_party/perl': { 'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3', 'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1702,13 +1718,13 @@ deps = {
}, },
'src/third_party/re2/src': 'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '8afcf7fcc481692197e33612446d69e8f5777c54', Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '3a8436ac436124a57a4e22d5c8713a2d42b381d7',
'src/third_party/r8': { 'src/third_party/r8': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'lhnuNLpWpWBVM6efX0iIg5i9ztfW8VKpMvkyfWCxfr0C', 'version': 'HGbnG0_a1HHQtwgKBlFRLuC0-AVyYhHpcTol560MvlUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1722,7 +1738,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'qGtBu6TtxyR5XNy4cmsslb7c946YtkZF5_QCjVP-wc8C', 'version': 'PwglNZFRNPkBBXdnY9NfrZFk2ULWDTRxhV9rl2kvkpUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1746,7 +1762,7 @@ deps = {
}, },
'src/third_party/ruy/src': 'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '3168a5c8f4c447fd8cea94078121ee2e2cd87df0', Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '21a85fef159f9942f636a43b14c64b481c2a05b2',
'src/third_party/skia': 'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'), Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1758,7 +1774,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0', Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src': 'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '469aae8118e18b7354607f8ef09780cf8f3e54aa', Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '88742a54683bcdec9a0d0c14462621da8b6f841e',
'src/third_party/sqlite4java': { 'src/third_party/sqlite4java': {
'packages': [ 'packages': [
@ -1800,20 +1816,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f', Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src': 'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ef70dc999eee784e3f505e89c798f8b9cc894e52', Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '60ec371de65a63d588bcfce7a99482847ad1312e',
'src/third_party/turbine': { 'src/third_party/turbine': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/turbine', 'package': 'chromium/third_party/turbine',
'version': 't0TeGgk2CZr3B0HtEqBb60BSRwVPRJm9066izhJwzz0C', 'version': 'YQC-btuan_DTe9V9dv_e1LxgYSWeOoDfrd-VSqzIvHkC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@243deb3abd84f442957dc5394745d25482ff791b', 'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3c1556cc73226c2895c1de9a925dc5fe623c8752',
'src/third_party/vulkan_memory_allocator': 'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907', Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1850,10 +1866,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2', Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src': 'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'a7e54e7b964d08901cba6418ca00ffec501bc867', Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '6c8361e98f1daba65902f5e2fc1297893ac14b67',
'src/third_party/webrtc': 'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'b6b9b1fc761c039195faa033cb8fdde4ed8ba0a9', Var('webrtc_git') + '/src.git' + '@' + 'd75b9e9ff07ee42841b4e416629c9fbd4b058905',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use # Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file. # Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1871,7 +1887,7 @@ deps = {
}, },
'src/third_party/xnnpack/src': 'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a50369c0fdd15f0f35b1a91c964644327a88d480', Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '659147817805d17c7be2d60bd7bbca7e780f9c82',
'src/tools/page_cycler/acid3': 'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521', Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1880,7 +1896,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/linux-amd64', 'package': 'skia/tools/goldctl/linux-amd64',
'version': 'PZOpm-VdLUuaVE8seo910YRCnqv7Y2BkPcrmUs13RMAC', 'version': '-G9gUusEGDPsbf_GULdyJo9DYyeNBuqD8gHfdxCvIbYC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1890,7 +1906,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/windows-amd64', 'package': 'skia/tools/goldctl/windows-amd64',
'version': 'qp3u_bn43vFlG3HHG61Co9eOeo52m6SWIw099mHqt9EC', 'version': 'BZ0EL-KSkwCzJciJf9MbwmZAJPRhlKOp0LEYiTV6lWIC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1901,7 +1917,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-amd64', 'package': 'skia/tools/goldctl/mac-amd64',
'version': 'aZ8KYJUPYrRq4f7-Td3nt0et_16S06A0vovOn2c85tIC', 'version': '0GVvuvDBNt6KJ7UzxBRUW5ShTWCliifyzaCkudNzmrkC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1912,7 +1928,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-arm64', 'package': 'skia/tools/goldctl/mac-arm64',
'version': 'JtcfJFsvsUuaaIajqvwETn8j5hxOSC_YLDszV96Ukn8C', 'version': '8vKG1ZGA0f7asv5AHh_7yBxVD2h-I-yR2oY4TOjwo6kC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1923,7 +1939,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'), Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': { 'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@f1d52b8c1ec0769ac006917d1fe42e99a4dba6c3', 'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@67c8cac0a84ad86b64ecf3f4af23a928fb605313',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1942,7 +1958,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/eche_app/app', 'package': 'chromeos_internal/apps/eche_app/app',
'version': 'Y9Vb3-WAI0IRjTRTVDtPP86MNNpZItvfey3JuYZXXeYC', 'version': 'WyNqAPOj-HR5fZBbkHIXf7YeyCvf0GpXuhdv6EqzNJsC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1953,7 +1969,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/help_app/app', 'package': 'chromeos_internal/apps/help_app/app',
'version': 'J19Uq07iO__IsduQFotOfHNdiRWoyIQc4UgK1HpMFU8C', 'version': 'hF_ZkOgJWb6Tl-9h6WAmpF4VcZggBH4rjoP_hBr2ddUC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1964,7 +1980,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/media_app/app', 'package': 'chromeos_internal/apps/media_app/app',
'version': 'CHpgn1-7IChFiK96I1-giMbXe-Cl9XQiwH3aHwCGzYwC', 'version': 'EXosTZG9iiyjnqmWKjS04Tf9dvSUjbHqqhGv1SQW0ycC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1975,7 +1991,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/projector_app/app', 'package': 'chromeos_internal/apps/projector_app/app',
'version': 'ufJ9DwqTBE76l81FUQQ2JOIG1ely5QRDFuwz3ccJIRIC', 'version': 'zmInwk2DIsJlzZbF9Fw29hmN6rQTpzqIgzzMAgwl2PkC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1994,7 +2010,7 @@ deps = {
}, },
'src/third_party/android_prebuilts/build_tools': { 'src/third_party/android_prebuilts/build_tools': {
'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '5794e96eb8bae47bb48feee915d99583573b3887', 'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '673c20b524a83b662d8c1057fd3eec8fd0f93f9d',
'condition': 'checkout_android_prebuilts_build_tools', 'condition': 'checkout_android_prebuilts_build_tools',
}, },
@ -2569,7 +2585,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@18.1.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2723,7 +2739,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@18.0.2.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2763,11 +2779,22 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/com_google_android_play_core': { 'src/third_party/android_deps/libs/com_google_android_play_core_common': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core', 'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core_common',
'version': 'version:2@1.10.0.cr1', 'version': 'version:2@2.0.2.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_play_feature_delivery': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_feature_delivery',
'version': 'version:2@2.0.1.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3328,7 +3355,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy', 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy',
'version': 'version:2@1.12.13.cr1', 'version': 'version:2@1.12.22.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3339,7 +3366,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent', 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent',
'version': 'version:2@1.12.13.cr1', 'version': 'version:2@1.12.22.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3478,39 +3505,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/org_jetbrains_annotations': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_annotations',
'version': 'version:2@13.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': { 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': {
'packages': [ 'packages': [
{ {
@ -3581,7 +3575,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core', 'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core',
'version': 'version:2@4.7.0.cr1', 'version': 'version:2@5.1.1.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3592,7 +3586,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis', 'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis',
'version': 'version:2@3.2.cr1', 'version': 'version:2@3.3.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3876,16 +3870,26 @@ include_rules = [
'+third_party/icu/source/i18n/unicode', '+third_party/icu/source/i18n/unicode',
'+url', '+url',
# Abseil features are allowlisted explicitly. See # Abseil is allowed by default, but some features are banned. See
# //styleguide/c++/c++-features.md. # //styleguide/c++/c++-features.md.
'-absl', '+third_party/abseil-cpp',
'-third_party/abseil-cpp', '-third_party/abseil-cpp/absl/algorithm/container.h',
'+third_party/abseil-cpp/absl/base/attributes.h', '-third_party/abseil-cpp/absl/container',
'+third_party/abseil-cpp/absl/cleanup/cleanup.h', '-third_party/abseil-cpp/absl/crc',
'+third_party/abseil-cpp/absl/numeric/int128.h', '-third_party/abseil-cpp/absl/flags',
'+third_party/abseil-cpp/absl/types/optional.h', '-third_party/abseil-cpp/absl/functional/any_invocable.h',
'+third_party/abseil-cpp/absl/types/variant.h', '-third_party/abseil-cpp/absl/functional/bind_front.h',
'+third_party/abseil-cpp/absl/utility/utility.h', '-third_party/abseil-cpp/absl/functional/function_ref.h',
'-third_party/abseil-cpp/absl/hash',
'-third_party/abseil-cpp/absl/log',
'-third_party/abseil-cpp/absl/random',
'-third_party/abseil-cpp/absl/status/statusor.h',
'-third_party/abseil-cpp/absl/strings',
'+third_party/abseil-cpp/absl/strings/cord.h',
'-third_party/abseil-cpp/absl/synchronization',
'-third_party/abseil-cpp/absl/time',
'-third_party/abseil-cpp/absl/types/any.h',
'-third_party/abseil-cpp/absl/types/span.h',
] ]
@ -4353,30 +4357,6 @@ hooks = [
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1', '-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1',
], ],
}, },
{
'name': 'msan_chained_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1',
],
},
{
'name': 'msan_no_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1',
],
},
{ {
'name': 'wasm_fuzzer', 'name': 'wasm_fuzzer',
'pattern': '.', 'pattern': '.',
@ -4572,7 +4552,7 @@ hooks = [
{ {
'name': 'Fetch Android AFDO profile', 'name': 'Fetch Android AFDO profile',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_android or checkout_linux', 'condition': 'checkout_android',
'action': [ 'python3', 'action': [ 'python3',
'src/tools/download_optimization_profile.py', 'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/newest.txt', '--newest_state=src/chrome/android/profiles/newest.txt',
@ -4581,6 +4561,18 @@ hooks = [
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm', '--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
], ],
}, },
{
'name': 'Fetch Android Arm AFDO profile',
'pattern': '.',
'condition': 'checkout_android',
'action': [ 'python3',
'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/arm.newest.txt',
'--local_state=src/chrome/android/profiles/arm.local.txt',
'--output_name=src/chrome/android/profiles/arm.afdo.prof',
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
],
},
{ {
'name': 'gvr_static_shim_android', 'name': 'gvr_static_shim_android',
'pattern': '\\.sha1', 'pattern': '\\.sha1',

File diff suppressed because it is too large Load Diff

View File

@ -207,7 +207,7 @@ AllocatorDispatch g_allocator_dispatch = {&AllocFn,
} // namespace base::allocator::dispatcher::allocator_shim_details } // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL) #if BUILDFLAG(USE_PARTITION_ALLOC)
namespace base::allocator::dispatcher::partition_allocator_details { namespace base::allocator::dispatcher::partition_allocator_details {
namespace { namespace {
@ -222,7 +222,7 @@ void PartitionFreeHook(void* address) {
} // namespace } // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details } // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL) #endif // BUILDFLAG(USE_PARTITION_ALLOC)
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
@ -236,11 +236,11 @@ void InstallStandardAllocatorHooks() {
// happen for tests. // happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL) #if BUILDFLAG(USE_PARTITION_ALLOC)
partition_alloc::PartitionAllocHooks::SetObserverHooks( partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook, &partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook); &partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL) #endif // BUILDFLAG(USE_PARTITION_ALLOC)
} }
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher

View File

@ -6,6 +6,8 @@
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/debug/crash_logging.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
@ -15,15 +17,25 @@
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
pthread_key_t ReentryGuard::entered_key_ = 0; // pthread_key_t has different signedness on Mac and Android. Store the null
// value in a strongly-typed constant to avoid "comparison of integers of
// different signs" warnings when comparing with 0.
constexpr pthread_key_t kNullKey = 0;
pthread_key_t ReentryGuard::entered_key_ = kNullKey;
void ReentryGuard::InitTLSSlot() { void ReentryGuard::InitTLSSlot() {
if (entered_key_ == 0) { if (entered_key_ == kNullKey) {
int error = pthread_key_create(&entered_key_, nullptr); int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error); CHECK(!error);
// Touch the TLS slot immediately to force any allocations.
// TODO(https://crbug.com/1411454): Use this technique to avoid allocations
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
// ReentryGuard redundant.
pthread_setspecific(entered_key_, nullptr);
} }
DCHECK(entered_key_ != 0); DCHECK_NE(entered_key_, kNullKey);
} }
#else #else
@ -31,4 +43,19 @@ void ReentryGuard::InitTLSSlot() {
void ReentryGuard::InitTLSSlot() {} void ReentryGuard::InitTLSSlot() {}
#endif #endif
void ReentryGuard::RecordTLSSlotToCrashKey() {
// Record the key in crash dumps to detect when it's higher than 32
// (PTHREAD_KEY_2NDLEVEL_SIZE).
// TODO(crbug.com/1411454): Remove this after diagnosing reentry crashes.
static auto* const crash_key = base::debug::AllocateCrashKeyString(
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
base::debug::SetCrashKeyString(crash_key, base::NumberToString(entered_key_));
#else
base::debug::SetCrashKeyString(crash_key, "unused");
#endif
}
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher

View File

@ -23,8 +23,10 @@ namespace base::allocator::dispatcher {
// twice. The scoped guard allows us to detect that. // twice. The scoped guard allows us to detect that.
// //
// Besides that the implementations of thread_local on macOS and Android // Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables. // seem to allocate memory lazily on the first access to thread_local variables
// Make use of pthread TLS instead of C++ thread_local there. // (and on Android at least thread_local is implemented on top of pthread so is
// strictly worse for performance). Make use of pthread TLS instead of C++
// thread_local there.
struct BASE_EXPORT ReentryGuard { struct BASE_EXPORT ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) { ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true)); pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
@ -37,12 +39,19 @@ struct BASE_EXPORT ReentryGuard {
explicit operator bool() const noexcept { return allowed_; } explicit operator bool() const noexcept { return allowed_; }
// This function must be called in very early of the process start-up in // This function must be called before installing any allocator hooks because
// order to acquire a low TLS slot number because glibc TLS implementation // some TLS implementations may allocate (eg. glibc will require a malloc call
// will require a malloc call to allocate storage for a higher slot number // to allocate storage for a higher slot number (>= PTHREAD_KEY_2NDLEVEL_SIZE
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot. // == 32). This touches the thread-local storage so that any malloc happens
// before installing the hooks.
static void InitTLSSlot(); static void InitTLSSlot();
// InitTLSSlot() is called before crash keys are available. At some point
// after SetCrashKeyImplementation() is called, this function should be
// called to record `entered_key_` to a crash key for debugging. This may
// allocate so it must not be called from inside an allocator hook.
static void RecordTLSSlotToCrashKey();
private: private:
static pthread_key_t entered_key_; static pthread_key_t entered_key_;
const bool allowed_; const bool allowed_;
@ -56,6 +65,7 @@ struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; } constexpr explicit operator bool() const noexcept { return true; }
static void InitTLSSlot(); static void InitTLSSlot();
static void RecordTLSSlotToCrashKey();
}; };
#endif #endif

View File

@ -32,10 +32,16 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
BASE_FEATURE(kPartitionAllocDanglingPtr, BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr", "PartitionAllocDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT); #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif
);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = { constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"}, {DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"}, {DanglingPtrMode::kLogOnly, "log_only"},
}; };
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
@ -43,14 +49,24 @@ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
DanglingPtrMode::kCrash, DanglingPtrMode::kCrash,
&kDanglingPtrModeOption, &kDanglingPtrModeOption,
}; };
constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
&kDanglingPtrTypeOption,
};
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't // If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly. // disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan, BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan", "PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition. // If enabled, PCScan is turned on only for the browser's malloc partition.
@ -88,7 +104,8 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
BASE_FEATURE(kPartitionAllocBackupRefPtr, BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX)) (BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
@ -183,11 +200,11 @@ BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
// In addition to heap, scan also the stack of the current mutator. // In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning, BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning", "PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED) #if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED) #endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
); );
BASE_FEATURE(kPartitionAllocDCScan, BASE_FEATURE(kPartitionAllocDCScan,

View File

@ -6,7 +6,6 @@
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/feature_list.h" #include "base/feature_list.h"
@ -25,10 +24,6 @@ extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam; kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md // See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
enum class DanglingPtrMode { enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr. // Crash immediately after detecting a dangling raw_ptr.
@ -36,19 +31,33 @@ enum class DanglingPtrMode {
// Log the signature of every occurrences without crashing. It is used by // Log the signature of every occurrences without crashing. It is used by
// bots. // bots.
// Format "[DanglingSignature]\t<1>\t<2>" // Format "[DanglingSignature]\t<1>\t<2>\t<3>\t<4>"
// 1. The function who freed the memory while it was still referenced. // 1. The function which freed the memory while it was still referenced.
// 2. The function who released the raw_ptr reference. // 2. The task in which the memory was freed.
kLogSignature, // 3. The function which released the raw_ptr reference.
// 4. The task in which the raw_ptr was released.
kLogOnly,
// Note: This will be extended with a single shot DumpWithoutCrashing. // Note: This will be extended with a single shot DumpWithoutCrashing.
}; };
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode> extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam; kDanglingPtrModeParam;
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
#if PA_CONFIG(ALLOW_PCSCAN) // Detect when freeing memory and releasing the dangling raw_ptr happens in
// a different task. Those are more likely to cause use after free.
kCrossTask,
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);

View File

@ -48,14 +48,14 @@
#include "build/build_config.h" #include "build/build_config.h"
#include "third_party/abseil-cpp/absl/types/optional.h" #include "third_party/abseil-cpp/absl/types/optional.h"
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h" #include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h" #include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/allocator/partition_allocator/starscan/stats_reporter.h" #include "base/allocator/partition_allocator/starscan/stats_reporter.h"
#include "base/memory/nonscannable_memory.h" #include "base/memory/nonscannable_memory.h"
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h" #include "base/system/sys_info.h"
@ -74,13 +74,13 @@ namespace {
namespace switches { namespace switches {
[[maybe_unused]] constexpr char kRendererProcess[] = "renderer"; [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
constexpr char kZygoteProcess[] = "zygote"; constexpr char kZygoteProcess[] = "zygote";
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
constexpr char kGpuProcess[] = "gpu-process"; constexpr char kGpuProcess[] = "gpu-process";
constexpr char kUtilityProcess[] = "utility"; constexpr char kUtilityProcess[] = "utility";
#endif #endif
} // namespace switches } // namespace switches
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString( constexpr const char* ScannerIdToTracingString(
@ -181,11 +181,11 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
static constexpr char kTraceCategory[] = "partition_alloc"; static constexpr char kTraceCategory[] = "partition_alloc";
}; };
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
} // namespace } // namespace
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
void RegisterPCScanStatsReporter() { void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter; static StatsReporterImpl s_reporter;
static bool registered = false; static bool registered = false;
@ -195,7 +195,7 @@ void RegisterPCScanStatsReporter() {
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter); partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true; registered = true;
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
namespace { namespace {
@ -302,7 +302,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
} }
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
[[maybe_unused]] bool pcscan_enabled = [[maybe_unused]] bool pcscan_enabled =
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly); FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
#else #else
false; false;
@ -378,7 +378,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
// fully controlled by Finch and thus have identical population sizes. // fully controlled by Finch and thus have identical population sizes.
std::string pcscan_group_name = "Unavailable"; std::string pcscan_group_name = "Unavailable";
std::string pcscan_group_name_fallback = "Unavailable"; std::string pcscan_group_name_fallback = "Unavailable";
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
if (brp_truly_enabled) { if (brp_truly_enabled) {
// If BRP protection is enabled, just ignore the population. Check // If BRP protection is enabled, just ignore the population. Check
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes // brp_truly_enabled, not brp_finch_enabled, because there are certain modes
@ -395,7 +395,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
} else { } else {
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled"); pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
trials.emplace("PCScan_Effective", pcscan_group_name); trials.emplace("PCScan_Effective", pcscan_group_name);
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback); trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@ -415,11 +415,13 @@ namespace {
internal::PartitionLock g_stack_trace_buffer_lock; internal::PartitionLock g_stack_trace_buffer_lock;
struct StackTraceWithID { struct DanglingPointerFreeInfo {
debug::StackTrace stack_trace; debug::StackTrace stack_trace;
debug::TaskTrace task_trace;
uintptr_t id = 0; uintptr_t id = 0;
}; };
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>; using DanglingRawPtrBuffer =
std::array<absl::optional<DanglingPointerFreeInfo>, 32>;
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock); DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
void DanglingRawPtrDetected(uintptr_t id) { void DanglingRawPtrDetected(uintptr_t id) {
@ -428,14 +430,14 @@ void DanglingRawPtrDetected(uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock); internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) { for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
PA_DCHECK(!entry || entry->id != id); PA_DCHECK(!entry || entry->id != id);
} }
#endif // DCHECK_IS_ON() #endif // DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) { for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
if (!entry) { if (!entry) {
entry = {debug::StackTrace(), id}; entry = {debug::StackTrace(), debug::TaskTrace(), id};
return; return;
} }
} }
@ -444,15 +446,16 @@ void DanglingRawPtrDetected(uintptr_t id) {
// enough. // enough.
} }
// From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one // From the traces recorded in |DanglingRawPtrDetected|, extract the one
// whose id match |id|. Return nullopt if not found. // whose id match |id|. Return nullopt if not found.
absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) { absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock); internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) { for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
if (entry && entry->id == id) { if (entry && entry->id == id) {
debug::StackTrace stack_trace = std::move(entry->stack_trace); absl::optional<DanglingPointerFreeInfo> result(entry);
entry = absl::nullopt; entry = absl::nullopt;
return stack_trace; return result;
} }
} }
return absl::nullopt; return absl::nullopt;
@ -463,14 +466,31 @@ absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
// are all the dangling raw_ptr occurrences in a table. // are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) { std::string ExtractDanglingPtrSignature(std::string stacktrace) {
std::vector<StringPiece> lines = SplitStringPiece( std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY); stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
// We are looking for the callers of the function releasing the raw_ptr and // We are looking for the callers of the function releasing the raw_ptr and
// freeing memory: // freeing memory:
const StringPiece callees[] = { const StringPiece callees[] = {
// Common signatures
"internal::PartitionFree",
"base::(anonymous namespace)::FreeFn",
// Linux signatures
"internal::RawPtrBackupRefImpl<>::ReleaseInternal()", "internal::RawPtrBackupRefImpl<>::ReleaseInternal()",
"internal::PartitionFree()", "base::RefCountedThreadSafe<>::Release()",
"base::(anonymous namespace)::FreeFn()",
// Windows signatures
"internal::RawPtrBackupRefImpl<0>::ReleaseInternal",
"_free_base",
// Windows stack traces are prefixed with "Backtrace:"
"Backtrace:",
// Mac signatures
"internal::RawPtrBackupRefImpl<false>::ReleaseInternal",
// Task traces are prefixed with "Task trace:" in
// |TaskTrace::OutputToStream|
"Task trace:",
}; };
size_t caller_index = 0; size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) { for (size_t i = 0; i < lines.size(); ++i) {
@ -481,78 +501,142 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
} }
} }
if (caller_index >= lines.size()) { if (caller_index >= lines.size()) {
return "undefined"; return "no_callee_match";
} }
StringPiece caller = lines[caller_index]; StringPiece caller = lines[caller_index];
// |callers| follows the following format: if (caller.empty()) {
return "invalid_format";
}
// On Posix platforms |callers| follows the following format:
// //
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend() // #<index> <address> <symbol>
// -- -------------- ----------------------------------------------- //
// Depth Address Function // See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] == '#') {
const size_t address_start = caller.find(' ');
const size_t function_start = caller.find(' ', address_start + 1);
size_t address_start = caller.find(' '); if (address_start == caller.npos || function_start == caller.npos) {
size_t function_start = caller.find(' ', address_start + 1); return "invalid_format";
}
if (address_start == caller.npos || function_start == caller.npos) { return std::string(caller.substr(function_start + 1));
return "undefined";
} }
return std::string(caller.substr(function_start + 1)); // On Windows platforms |callers| follows the following format:
} //
// \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
void DanglingRawPtrReleasedLogSignature(uintptr_t id) { //
// This is called from raw_ptr<>'s release operation. Making allocations is // See https://crsrc.org/c/base/debug/stack_trace_win.cc
// allowed. In particular, symbolizing and printing the StackTraces may if (caller[0] == '\t') {
// allocate memory. const size_t symbol_start = 1;
const size_t symbol_end = caller.find(' ');
debug::StackTrace stack_trace_release; if (symbol_end == caller.npos) {
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id); return "invalid_format";
}
if (stack_trace_free) { return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
} else {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
} }
// On Mac platforms |callers| follows the following format:
//
// <index> <library> 0x<address> <symbol> + <line>
//
// See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] >= '0' && caller[0] <= '9') {
const size_t address_start = caller.find("0x");
const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
const size_t symbol_end = caller.find(' ', symbol_start);
if (symbol_start == caller.npos || symbol_end == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
}
return "invalid_format";
} }
void DanglingRawPtrReleasedCrash(uintptr_t id) { std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
if (task_trace.empty()) {
return "No active task";
}
return ExtractDanglingPtrSignature(task_trace.ToString());
}
std::string ExtractDanglingPtrSignature(
absl::optional<DanglingPointerFreeInfo> free_info,
debug::StackTrace release_stack_trace,
debug::TaskTrace release_task_trace) {
if (free_info) {
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s",
ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
template <features::DanglingPtrMode dangling_pointer_mode,
features::DanglingPtrType dangling_pointer_type>
void DanglingRawPtrReleased(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is // This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may // allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory. // allocate memory.
debug::StackTrace stack_trace_release; debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release; debug::TaskTrace task_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id); absl::optional<DanglingPointerFreeInfo> free_info =
TakeDanglingPointerFreeInfo(id);
if constexpr (dangling_pointer_type ==
features::DanglingPtrType::kCrossTask) {
if (!free_info) {
return;
}
if (task_trace_release.ToString() == free_info->task_trace.ToString()) {
return;
}
}
std::string dangling_signature = ExtractDanglingPtrSignature(
free_info, stack_trace_release, task_trace_release);
static const char dangling_ptr_footer[] = static const char dangling_ptr_footer[] =
"\n" "\n"
"\n" "\n"
"Please check for more information on:\n" "Please check for more information on:\n"
"https://chromium.googlesource.com/chromium/src/+/main/docs/" "https://chromium.googlesource.com/chromium/src/+/main/docs/"
"dangling_ptr_guide.md\n"; "dangling_ptr_guide.md\n"
"\n"
if (stack_trace_free) { "Googlers: Please give us your feedback about the dangling pointer\n"
" detector at:\n"
" http://go/dangling-ptr-cq-survey\n";
if (free_info) {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n"
<< dangling_signature << "\n\n"
<< "The memory was freed at:\n" << "The memory was freed at:\n"
<< *stack_trace_free << "\n" << free_info->stack_trace << free_info->task_trace << "\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release << stack_trace_release << task_trace_release
<< dangling_ptr_footer; << dangling_ptr_footer;
} else { } else {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n" << "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release << stack_trace_release << task_trace_release
<< dangling_ptr_footer; << dangling_ptr_footer;
} }
ImmediateCrash();
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
ImmediateCrash();
}
} }
void ClearDanglingRawPtrBuffer() { void ClearDanglingRawPtrBuffer() {
@ -573,16 +657,35 @@ void InstallDanglingRawPtrChecks() {
return; return;
} }
partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
switch (features::kDanglingPtrModeParam.Get()) { switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash: case features::DanglingPtrMode::kCrash:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected); switch (features::kDanglingPtrTypeParam.Get()) {
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash); case features::DanglingPtrType::kAll:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kCrossTask>);
break;
}
break; break;
case features::DanglingPtrMode::kLogOnly:
case features::DanglingPtrMode::kLogSignature: switch (features::kDanglingPtrTypeParam.Get()) {
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected); case features::DanglingPtrType::kAll:
partition_alloc::SetDanglingRawPtrReleasedFn( partition_alloc::SetDanglingRawPtrReleasedFn(
DanglingRawPtrReleasedLogSignature); &DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
features::DanglingPtrType::kCrossTask>);
break;
}
break; break;
} }
} }
@ -632,7 +735,7 @@ void InstallUnretainedDanglingRawPtrChecks() {
namespace { namespace {
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
void SetProcessNameForPCScan(const std::string& process_type) { void SetProcessNameForPCScan(const std::string& process_type) {
const char* name = [&process_type] { const char* name = [&process_type] {
if (process_type.empty()) { if (process_type.empty()) {
@ -713,7 +816,7 @@ bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false; return false;
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
} // namespace } // namespace
@ -929,7 +1032,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// If BRP is not enabled, check if any of PCScan flags is enabled. // If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false; [[maybe_unused]] bool scan_enabled = false;
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
if (!enable_brp) { if (!enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded(); scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process. // No specified process type means this is the Browser process.
@ -963,10 +1066,10 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
SetProcessNameForPCScan(process_type); SetProcessNameForPCScan(process_type);
} }
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
// Non-quarantinable partition is dealing with hot V8's zone allocations. // Non-quarantinable partition is dealing with hot V8's zone allocations.
// In case PCScan is enabled in Renderer, enable thread cache on this // In case PCScan is enabled in Renderer, enable thread cache on this
// partition. At the same time, thread cache on the main(malloc) partition // partition. At the same time, thread cache on the main(malloc) partition
@ -976,7 +1079,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
.root() .root()
->EnableThreadCacheIfSupported(); ->EnableThreadCacheIfSupported();
} else } else
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
{ {
allocator_shim::internal::PartitionAllocMalloc::Allocator() allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported(); ->EnableThreadCacheIfSupported();
@ -1058,7 +1161,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) && #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanMUAwareScheduler)) { base::features::kPartitionAllocPCScanMUAwareScheduler)) {
// Assign PCScan a task-based scheduling backend. // Assign PCScan a task-based scheduling backend.
@ -1070,7 +1173,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend( partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
*mu_aware_task_based_backend.get()); *mu_aware_task_based_backend.get());
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::allocator::StartMemoryReclaimer( base::allocator::StartMemoryReclaimer(
@ -1136,4 +1239,11 @@ void PartitionAllocSupport::OnBackgrounded() {
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} }
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
std::string stacktrace) {
return ExtractDanglingPtrSignature(stacktrace);
}
#endif
} // namespace base::allocator } // namespace base::allocator

View File

@ -19,7 +19,7 @@
namespace base::allocator { namespace base::allocator {
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter(); BASE_EXPORT void RegisterPCScanStatsReporter();
#endif #endif
@ -75,6 +75,11 @@ class BASE_EXPORT PartitionAllocSupport {
void OnForegrounded(bool has_main_frame); void OnForegrounded(bool has_main_frame);
void OnBackgrounded(); void OnBackgrounded();
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
static std::string ExtractDanglingPtrSignatureForTests(
std::string stacktrace);
#endif
static PartitionAllocSupport* Get() { static PartitionAllocSupport* Get() {
static auto* singleton = new PartitionAllocSupport(); static auto* singleton = new PartitionAllocSupport();
return singleton; return singleton;

View File

@ -289,19 +289,20 @@ component("partition_alloc") {
} }
if (use_starscan) { if (use_starscan) {
if (current_cpu == "x64") { if (current_cpu == "x64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") { } else if (current_cpu == "x86") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
} else if (current_cpu == "arm") { } else if (current_cpu == "arm") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") { } else if (current_cpu == "arm64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
} else { } else {
# To support a trampoline for another arch, please refer to v8/src/heap/base. # To support a trampoline for another arch, please refer to v8/src/heap/base.
assert(!pcscan_stack_supported)
} }
} }
public_deps = [ public_deps = [
@ -397,6 +398,12 @@ source_set("raw_ptr") {
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ] defines = [ "IS_RAW_PTR_IMPL" ]
# When built inside Chromium, although this cannot directly be made a
# component, we expect `//base` to provide the only GN-level access.
if (build_with_chromium) {
visibility = [ "//base" ]
}
} }
buildflag_header("partition_alloc_buildflags") { buildflag_header("partition_alloc_buildflags") {
@ -415,12 +422,15 @@ buildflag_header("partition_alloc_buildflags") {
# defines and partition alloc includes the header file. For chrome, # defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes. # gen/base/allocator/buildflags.h defines and chrome includes.
flags = [ flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"USE_PARTITION_ALLOC=$use_partition_alloc", "USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc", "USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support", "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks", "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks", "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment", "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr", "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot", "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
@ -442,9 +452,17 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata", "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_STARSCAN=$use_starscan", "USE_STARSCAN=$use_starscan",
"PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
"ENABLE_PKEYS=$enable_pkeys", "ENABLE_PKEYS=$enable_pkeys",
] ]
if (is_apple) {
# TODO(crbug.com/1414153): once TimeTicks::Now behavior is unified on iOS,
# this should be removed.
flags += [ "PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS=" +
"$partition_alloc_enable_mach_absolute_time_ticks" ]
}
} }
buildflag_header("chromecast_buildflags") { buildflag_header("chromecast_buildflags") {

View File

@ -158,5 +158,9 @@ specific_include_rules = {
"+base", "+base",
"+third_party/abseil-cpp/absl/types/optional.h", "+third_party/abseil-cpp/absl/types/optional.h",
"+third_party/abseil-cpp/absl/types/variant.h", "+third_party/abseil-cpp/absl/types/variant.h",
],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
] ]
} }

View File

@ -34,7 +34,7 @@ AddressPoolManager& AddressPoolManager::GetInstance() {
return singleton_; return singleton_;
} }
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace { namespace {
@ -77,8 +77,9 @@ uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
} }
void AddressPoolManager::ResetForTesting() { void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i) for (size_t i = 0; i < std::size(aligned_pools_.pools_); ++i) {
aligned_pools_.pools_[i].Reset(); aligned_pools_.pools_[i].Reset();
}
} }
void AddressPoolManager::Remove(pool_handle handle) { void AddressPoolManager::Remove(pool_handle handle) {
@ -102,7 +103,7 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle, void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address, uintptr_t address,
size_t length) { size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools); PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized()); PA_DCHECK(pool->IsInitialized());
DecommitPages(address, length); DecommitPages(address, length);
@ -299,7 +300,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true; return true;
} }
#else // PA_CONFIG(HAS_64_BITS_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert( static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
@ -531,7 +532,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true; return true;
} }
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) { void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{}; AddressSpaceStats stats{};

View File

@ -15,8 +15,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h" #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -53,7 +53,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
AddressPoolManager(const AddressPoolManager&) = delete; AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete; AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length); void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle); void Remove(pool_handle handle);
@ -63,7 +63,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// Return the base address of a pool. // Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle); uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// Reserves address space from the pool. // Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle, uintptr_t Reserve(pool_handle handle,
@ -76,7 +76,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
size_t length); size_t length);
void ResetForTesting(); void ResetForTesting();
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size); void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size); void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
@ -87,7 +87,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
static bool IsManagedByBRPPool(uintptr_t address) { static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address); return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
} }
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper); void DumpStats(AddressSpaceStatsDumper* dumper);
@ -107,7 +107,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// if PartitionAlloc is wholly unused in this process.) // if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats); bool GetStats(AddressSpaceStats* stats);
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
class Pool { class Pool {
public: public:
constexpr Pool() = default; constexpr Pool() = default;
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
}; };
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) { PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools); PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
return &aligned_pools_.pools_[handle - 1]; return &aligned_pools_.pools_[handle - 1];
} }
@ -168,7 +168,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {}; char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {};
} aligned_pools_ PA_PKEY_ALIGN; } aligned_pools_ PA_PKEY_ALIGN;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_; static PA_CONSTINIT AddressPoolManager singleton_;
}; };

View File

@ -7,7 +7,7 @@
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -34,4 +34,4 @@ std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)

View File

@ -14,12 +14,11 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc { namespace partition_alloc {
@ -185,6 +184,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc } // namespace partition_alloc
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -7,7 +7,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
using pool_handle = unsigned; enum pool_handle : unsigned;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -4,8 +4,8 @@
#include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -18,7 +18,7 @@ namespace partition_alloc {
uintptr_t GetRandomPageBase() { uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue()); uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
random <<= 32ULL; random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue()); random |= static_cast<uintptr_t>(internal::RandomValue());
@ -26,7 +26,7 @@ uintptr_t GetRandomPageBase() {
// OS and build configuration. // OS and build configuration.
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#else // PA_CONFIG(HAS_64_BITS_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes // On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the // excessive fragmentation. Plus most of these systems lack ASLR, so the
@ -40,7 +40,7 @@ uintptr_t GetRandomPageBase() {
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random; return random;

View File

@ -9,7 +9,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
namespace partition_alloc { namespace partition_alloc {
@ -20,9 +19,9 @@ struct PoolStats {
// On 32-bit, pools are mainly logical entities, intermingled with // On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available // allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case. // reservation" is not possible to measure in that case.
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
size_t largest_available_reservation = 0; size_t largest_available_reservation = 0;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif
}; };
struct AddressSpaceStats { struct AddressSpaceStats {
@ -30,14 +29,14 @@ struct AddressSpaceStats {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PoolStats brp_pool_stats; PoolStats brp_pool_stats;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
PoolStats configurable_pool_stats; PoolStats configurable_pool_stats;
#else #else
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t blocklist_size; // measured in super pages size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count; size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
PoolStats pkey_pool_stats; PoolStats pkey_pool_stats;
#endif #endif

View File

@ -14,6 +14,6 @@ use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = false put_ref_count_in_previous_slot_default = true
enable_backup_ref_ptr_slow_checks_default = false enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false enable_dangling_raw_ptr_checks_default = false

View File

@ -6,20 +6,9 @@ standalone repository for PartitionAlloc is hosted
## GN Args ## GN Args
External clients mainly need to set these six GN args: External clients should examine the args described in
`build_overrides/partition_alloc.gni` and add them in their own source
``` none tree. PartitionAlloc's build will expect them at
# These are blocked on PA-E and `raw_ptr.h` and can never be true until
# we make them part of the standalone PA distribution.
use_partition_alloc_as_malloc_default = false
enable_mte_checked_ptr_support_default = false
enable_backup_ref_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
```
PartitionAlloc's build will expect them at
`//build_overrides/partition_alloc.gni`. `//build_overrides/partition_alloc.gni`.
In addition, something must provide `build_with_chromium = false` to In addition, something must provide `build_with_chromium = false` to

View File

@ -136,8 +136,18 @@ bool UseMapJit() {
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) == return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue; kCFBooleanTrue;
} }
#endif // BUILDFLAG(IS_MAC) #elif BUILDFLAG(IS_IOS)
bool UseMapJit() {
// Always enable MAP_JIT in simulator as it is supported unconditionally.
#if TARGET_IPHONE_SIMULATOR
return true;
#else
// TODO(https://crbug.com/1413818): Fill this out when the API it is
// available.
return false;
#endif // TARGET_IPHONE_SIMULATOR
}
#endif // BUILDFLAG(IS_IOS)
} // namespace } // namespace
// |mmap| uses a nearby address if the hint address is blocked. // |mmap| uses a nearby address if the hint address is blocked.
@ -166,7 +176,7 @@ uintptr_t SystemAllocPagesInternal(uintptr_t hint,
int access_flag = GetAccessFlags(accessibility); int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE; int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_APPLE)
// On macOS 10.14 and higher, executables that are code signed with the // On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt // "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit" // into this capability by specifying the "com.apple.security.cs.allow-jit"
@ -369,7 +379,6 @@ bool TryRecommitSystemPagesInternal(
} }
void DiscardSystemPagesInternal(uintptr_t address, size_t length) { void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
#if !BUILDFLAG(IS_NACL)
void* ptr = reinterpret_cast<void*>(address); void* ptr = reinterpret_cast<void*>(address);
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
int ret = madvise(ptr, length, MADV_FREE_REUSABLE); int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
@ -378,7 +387,7 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
ret = madvise(ptr, length, MADV_DONTNEED); ret = madvise(ptr, length, MADV_DONTNEED);
} }
PA_PCHECK(ret == 0); PA_PCHECK(ret == 0);
#else #else // BUILDFLAG(IS_APPLE)
// We have experimented with other flags, but with suboptimal results. // We have experimented with other flags, but with suboptimal results.
// //
// MADV_FREE (Linux): Makes our memory measurements less predictable; // MADV_FREE (Linux): Makes our memory measurements less predictable;
@ -391,8 +400,7 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
return; return;
} }
PA_PCHECK(ret == 0); PA_PCHECK(ret == 0);
#endif #endif // BUILDFLAG(IS_APPLE)
#endif // !BUILDFLAG(IS_NACL)
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -10,8 +10,8 @@
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -28,7 +28,7 @@ bool IsOutOfMemory(DWORD error) {
case ERROR_COMMITMENT_MINIMUM: case ERROR_COMMITMENT_MINIMUM:
// Page file is too small. // Page file is too small.
case ERROR_COMMITMENT_LIMIT: case ERROR_COMMITMENT_LIMIT:
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// Not enough memory resources are available to process this command. // Not enough memory resources are available to process this command.
// //
// It is not entirely clear whether this error pertains to out of address // It is not entirely clear whether this error pertains to out of address

View File

@ -16,6 +16,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -36,7 +37,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace { namespace {
@ -422,6 +423,6 @@ PageCharacteristics page_characteristics;
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -26,7 +26,7 @@
#include "build/build_config.h" #include "build/build_config.h"
// The feature is not applicable to 32-bit address space. // The feature is not applicable to 32-bit address space.
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc { namespace partition_alloc {
@ -52,7 +52,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!IsInBRPPool(address)); PA_DCHECK(!IsInBRPPool(address));
#endif #endif
pool_handle pool = 0; pool_handle pool = kNullPoolHandle;
uintptr_t base = 0; uintptr_t base = 0;
if (IsInRegularPool(address)) { if (IsInRegularPool(address)) {
pool = kRegularPoolHandle; pool = kRegularPoolHandle;
@ -475,6 +475,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc } // namespace partition_alloc
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -14,7 +14,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h" #include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
@ -105,16 +104,6 @@ void PartitionAllocGlobalUninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
internal::PartitionAddressSpace::UninitPkeyPoolForTesting(); internal::PartitionAddressSpace::UninitPkeyPoolForTesting();
#endif #endif
#if BUILDFLAG(USE_STARSCAN)
internal::PCScan::UninitForTesting(); // IN-TEST
#endif
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance().ResetForTesting();
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::g_oom_handling_function = nullptr; internal::g_oom_handling_function = nullptr;
} }

View File

@ -5,6 +5,23 @@
import("//build/config/sanitizers/sanitizers.gni") import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/partition_alloc.gni") import("//build_overrides/partition_alloc.gni")
if (is_apple) {
import("//build/config/features.gni")
}
# Whether 64-bit pointers are used.
# A static_assert in partition_alloc_config.h verifies that.
if (is_nacl) {
# NaCl targets don't use 64-bit pointers.
has_64_bit_pointers = false
} else if (current_cpu == "x64" || current_cpu == "arm64" || current_cpu == "mips64el") {
has_64_bit_pointers = true
} else if (current_cpu == "x86" || current_cpu == "arm" || current_cpu == "mipsel") {
has_64_bit_pointers = false
} else {
assert(false, "Unknown CPU: $current_cpu")
}
if (use_partition_alloc_as_malloc_default) { if (use_partition_alloc_as_malloc_default) {
_default_allocator = "partition" _default_allocator = "partition"
} else { } else {
@ -119,6 +136,14 @@ declare_args() {
enable_dangling_raw_ptr_checks = enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enable the feature flags required to check for dangling pointers. That is to
# say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
#
# This is meant to be used on bots only. It is much easier to override the
# feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flags_for_bots = false
# Enables the dangling raw_ptr checks feature for the performance experiment. # Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid # Not every dangling pointers have been fixed or annotated yet. To avoid
# accounting for the cost of calling the PA's embedder's callbacks when a # accounting for the cost of calling the PA's embedder's callbacks when a
@ -136,10 +161,20 @@ declare_args() {
# Shadow metadata is still under development and only supports Linux # Shadow metadata is still under development and only supports Linux
# for now. # for now.
enable_shadow_metadata = false enable_shadow_metadata = false
if (is_apple) {
# use_blink currently assumes mach absolute ticks (eg, to ensure trace
# events cohere).
partition_alloc_enable_mach_absolute_time_ticks = is_mac || use_blink
}
} }
# *Scan is currently only used by Chromium. # *Scan is currently only used by Chromium, and supports only 64-bit.
use_starscan = build_with_chromium use_starscan = build_with_chromium && has_64_bit_pointers
pcscan_stack_supported =
use_starscan && (current_cpu == "x64" || current_cpu == "x86" ||
current_cpu == "arm" || current_cpu == "arm64")
# We want to provide assertions that guard against inconsistent build # We want to provide assertions that guard against inconsistent build
# args, but there is no point in having them fire if we're not building # args, but there is no point in having them fire if we're not building
@ -223,6 +258,14 @@ assert(!use_asan_backup_ref_ptr || is_asan,
assert(!use_asan_unowned_ptr || is_asan, assert(!use_asan_unowned_ptr || is_asan,
"AsanUnownedPtr requires AddressSanitizer") "AsanUnownedPtr requires AddressSanitizer")
if (is_apple) {
assert(!use_blink || partition_alloc_enable_mach_absolute_time_ticks,
"use_blink requires partition_alloc_enable_mach_absolute_time_ticks")
assert(!is_mac || partition_alloc_enable_mach_absolute_time_ticks,
"mac requires partition_alloc_enable_mach_absolute_time_ticks")
}
# AsanBackupRefPtr is not supported outside Chromium. The implementation is # AsanBackupRefPtr is not supported outside Chromium. The implementation is
# entangled with `//base`. The code is only physically located with the # entangled with `//base`. The code is only physically located with the
# rest of `raw_ptr` to keep it together. # rest of `raw_ptr` to keep it together.

View File

@ -137,7 +137,7 @@
#endif #endif
// MemorySanitizer annotations. // MemorySanitizer annotations.
#if defined(MEMORY_SANITIZER) && !BUILDFLAG(IS_NACL) #if defined(MEMORY_SANITIZER)
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>
// Mark a memory region fully initialized. // Mark a memory region fully initialized.

View File

@ -41,13 +41,7 @@
#if defined(COMPILER_GCC) #if defined(COMPILER_GCC)
#if BUILDFLAG(IS_NACL) #if defined(ARCH_CPU_X86_FAMILY)
// Crash report accuracy is not guaranteed on NaCl.
#define PA_TRAP_SEQUENCE1_() __builtin_trap()
#define PA_TRAP_SEQUENCE2_() asm volatile("")
#elif defined(ARCH_CPU_X86_FAMILY)
// TODO(https://crbug.com/958675): In theory, it should be possible to use just // TODO(https://crbug.com/958675): In theory, it should be possible to use just
// int3. However, there are a number of crashes with SIGILL as the exception // int3. However, there are a number of crashes with SIGILL as the exception

View File

@ -12,7 +12,7 @@
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
#if defined(__GLIBC__) || BUILDFLAG(IS_NACL) #if defined(__GLIBC__)
#define USE_HISTORICAL_STRERROR_R 1 #define USE_HISTORICAL_STRERROR_R 1
// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE // Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
// is defined, but the symbol is renamed to __gnu_strerror_r which only exists // is defined, but the symbol is renamed to __gnu_strerror_r which only exists

View File

@ -18,7 +18,7 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL) #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include "third_party/lss/linux_syscall_support.h" #include "third_party/lss/linux_syscall_support.h"
#elif BUILDFLAG(IS_MAC) #elif BUILDFLAG(IS_MAC)
// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK. // TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
@ -68,7 +68,7 @@ namespace partition_alloc::internal::base {
// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land // (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
// it or some form of it. // it or some form of it.
void RandBytes(void* output, size_t output_length) { void RandBytes(void* output, size_t output_length) {
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL) #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
// We have to call `getrandom` via Linux Syscall Support, rather than through // We have to call `getrandom` via Linux Syscall Support, rather than through
// the libc wrapper, because we might not have an up-to-date libc (e.g. on // the libc wrapper, because we might not have an up-to-date libc (e.g. on
// some bots). // some bots).

View File

@ -116,11 +116,6 @@ PlatformThreadId PlatformThread::CurrentId() {
return zx_thread_self(); return zx_thread_self();
#elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX) #elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX)
return pthread_self(); return pthread_self();
#elif BUILDFLAG(IS_NACL) && defined(__GLIBC__)
return pthread_self();
#elif BUILDFLAG(IS_NACL) && !defined(__GLIBC__)
// Pointers are 32-bits in NaCl.
return reinterpret_cast<int32_t>(pthread_self());
#elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX) #elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX)
return pthread_self(); return pthread_self();
#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX) #elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX)

View File

@ -26,7 +26,7 @@
#include <sys/resource.h> #include <sys/resource.h>
#endif #endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif #endif
@ -52,16 +52,14 @@ void* ThreadFunc(void* params) {
delegate = thread_params->delegate; delegate = thread_params->delegate;
#if !BUILDFLAG(IS_NACL) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PCScan::NotifyThreadCreated(GetStackPointer()); PCScan::NotifyThreadCreated(GetStackPointer());
#endif
#endif #endif
} }
delegate->ThreadMain(); delegate->ThreadMain();
#if !BUILDFLAG(IS_NACL) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadDestroyed(); PCScan::NotifyThreadDestroyed();
#endif #endif

View File

@ -14,7 +14,7 @@
#include <windows.h> #include <windows.h>
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif #endif
@ -62,7 +62,7 @@ DWORD __stdcall ThreadFunc(void* params) {
GetCurrentProcess(), &platform_handle, 0, GetCurrentProcess(), &platform_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS); FALSE, DUPLICATE_SAME_ACCESS);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadCreated(GetStackPointer()); PCScan::NotifyThreadCreated(GetStackPointer());
#endif #endif
@ -74,7 +74,7 @@ DWORD __stdcall ThreadFunc(void* params) {
delete thread_params; delete thread_params;
delegate->ThreadMain(); delegate->ThreadMain();
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadDestroyed(); PCScan::NotifyThreadDestroyed();
#endif #endif
return 0; return 0;

View File

@ -75,6 +75,10 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_FUCHSIA) #if BUILDFLAG(IS_FUCHSIA)
#include <zircon/types.h> #include <zircon/types.h>
#endif #endif
@ -136,9 +140,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeDelta {
#if BUILDFLAG(IS_FUCHSIA) #if BUILDFLAG(IS_FUCHSIA)
static TimeDelta FromZxDuration(zx_duration_t nanos); static TimeDelta FromZxDuration(zx_duration_t nanos);
#endif #endif
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeDelta FromMachTime(uint64_t mach_time); static TimeDelta FromMachTime(uint64_t mach_time);
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
// Converts an integer value representing TimeDelta to a class. This is used // Converts an integer value representing TimeDelta to a class. This is used
// when deserializing a |TimeDelta| structure, using a value known to be // when deserializing a |TimeDelta| structure, using a value known to be
@ -879,14 +885,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks
static TimeTicks FromQPCValue(LONGLONG qpc_value); static TimeTicks FromQPCValue(LONGLONG qpc_value);
#endif #endif
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time); static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
// Sets the current Mach timebase to `timebase`. Returns the old timebase. // Sets the current Mach timebase to `timebase`. Returns the old timebase.
static mach_timebase_info_data_t SetMachTimebaseInfoForTesting( static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
mach_timebase_info_data_t timebase); mach_timebase_info_data_t timebase);
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
// Converts to TimeTicks the value obtained from SystemClock.uptimeMillis(). // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
@ -979,7 +987,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadTicks
// Returns true if ThreadTicks::Now() is supported on this system. // Returns true if ThreadTicks::Now() is supported on this system.
[[nodiscard]] static bool IsSupported() { [[nodiscard]] static bool IsSupported() {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
return true; return true;
#elif BUILDFLAG(IS_WIN) #elif BUILDFLAG(IS_WIN)
return IsSupportedWin(); return IsSupportedWin();

View File

@ -14,18 +14,21 @@
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <time.h>
#if BUILDFLAG(IS_IOS)
#include <errno.h>
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h" #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h" #include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h" #include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
namespace { namespace {
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns a pointer to the initialized Mach timebase info struct. // Returns a pointer to the initialized Mach timebase info struct.
mach_timebase_info_data_t* MachTimebaseInfo() { mach_timebase_info_data_t* MachTimebaseInfo() {
static mach_timebase_info_data_t timebase_info = []() { static mach_timebase_info_data_t timebase_info = []() {
@ -78,48 +81,32 @@ int64_t MachTimeToMicroseconds(uint64_t mach_time) {
// 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277). // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
return checked_cast<int64_t>(microseconds); return checked_cast<int64_t>(microseconds);
} }
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns monotonically growing number of ticks in microseconds since some // Returns monotonically growing number of ticks in microseconds since some
// unspecified starting point. // unspecified starting point.
int64_t ComputeCurrentTicks() { int64_t ComputeCurrentTicks() {
#if BUILDFLAG(IS_IOS) #if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// iOS 10 supports clock_gettime(CLOCK_MONOTONIC, ...), which is struct timespec tp;
// around 15 times faster than sysctl() call. Use it if possible; // clock_gettime() returns 0 on success and -1 on failure. Failure can only
// otherwise, fall back to sysctl(). // happen because of bad arguments (unsupported clock type or timespec
if (__builtin_available(iOS 10, *)) { // pointer out of accessible address space). Here it is known that neither
struct timespec tp; // can happen since the timespec parameter is stack allocated right above and
if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) { // `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is
return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000; // supported on.
} int res = clock_gettime(CLOCK_MONOTONIC, &tp);
} PA_DCHECK(0 == res) << "Failed clock_gettime, errno: " << errno;
// On iOS mach_absolute_time stops while the device is sleeping. Instead use return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
// now - KERN_BOOTTIME to get a time difference that is not impacted by clock
// changes. KERN_BOOTTIME will be updated by the system whenever the system
// clock change.
struct timeval boottime;
int mib[2] = {CTL_KERN, KERN_BOOTTIME};
size_t size = sizeof(boottime);
int kr = sysctl(mib, std::size(mib), &boottime, &size, nullptr, 0);
PA_DCHECK(KERN_SUCCESS == kr);
TimeDelta time_difference =
subtle::TimeNowIgnoringOverride() -
(Time::FromTimeT(boottime.tv_sec) + Microseconds(boottime.tv_usec));
return time_difference.InMicroseconds();
#else #else
// mach_absolute_time is it when it comes to ticks on the Mac. Other calls // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
// with less precision (such as TickCount) just call through to // with less precision (such as TickCount) just call through to
// mach_absolute_time. // mach_absolute_time.
return MachTimeToMicroseconds(mach_absolute_time()); return MachTimeToMicroseconds(mach_absolute_time());
#endif // BUILDFLAG(IS_IOS) #endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
} }
int64_t ComputeThreadTicks() { int64_t ComputeThreadTicks() {
#if BUILDFLAG(IS_IOS)
PA_NOTREACHED();
return 0;
#else
// The pthreads library keeps a cached reference to the thread port, which // The pthreads library keeps a cached reference to the thread port, which
// does not have to be released like mach_thread_self() does. // does not have to be released like mach_thread_self() does.
mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
@ -142,7 +129,6 @@ int64_t ComputeThreadTicks() {
absolute_micros += (thread_info_data.user_time.microseconds + absolute_micros += (thread_info_data.user_time.microseconds +
thread_info_data.system_time.microseconds); thread_info_data.system_time.microseconds);
return absolute_micros.ValueOrDie(); return absolute_micros.ValueOrDie();
#endif // BUILDFLAG(IS_IOS)
} }
} // namespace } // namespace
@ -200,12 +186,12 @@ NSDate* Time::ToNSDate() const {
// TimeDelta ------------------------------------------------------------------ // TimeDelta ------------------------------------------------------------------
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static // static
TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) { TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
return Microseconds(MachTimeToMicroseconds(mach_time)); return Microseconds(MachTimeToMicroseconds(mach_time));
} }
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// TimeTicks ------------------------------------------------------------------ // TimeTicks ------------------------------------------------------------------
@ -225,7 +211,7 @@ bool TimeTicks::IsConsistentAcrossProcesses() {
return true; return true;
} }
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static // static
TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) { TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
return TimeTicks(MachTimeToMicroseconds(mach_absolute_time)); return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
@ -241,15 +227,15 @@ mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
return orig_timebase; return orig_timebase;
} }
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static // static
TimeTicks::Clock TimeTicks::GetClock() { TimeTicks::Clock TimeTicks::GetClock() {
#if BUILDFLAG(IS_IOS) #if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME; return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
#else #else
return Clock::MAC_MACH_ABSOLUTE_TIME; return Clock::MAC_MACH_ABSOLUTE_TIME;
#endif // BUILDFLAG(IS_IOS) #endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
} }
// ThreadTicks ---------------------------------------------------------------- // ThreadTicks ----------------------------------------------------------------

View File

@ -23,37 +23,25 @@
// 4. Do not use PA_CONFIG() when defining config macros, or it will lead to // 4. Do not use PA_CONFIG() when defining config macros, or it will lead to
// recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly. // recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly.
// 5. Try to use constexpr instead of macros wherever possible. // 5. Try to use constexpr instead of macros wherever possible.
// TODO(bartekn): Convert macros to constexpr as much as possible. // TODO(bartekn): Convert macros to constexpr or BUILDFLAG as much as possible.
#define PA_CONFIG(flag) (PA_CONFIG_##flag()) #define PA_CONFIG(flag) (PA_CONFIG_##flag())
// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit // Assert that the heuristic in partition_alloc.gni is accurate on supported
// address space. The only known case where address space is 32-bit is NaCl, so // configurations.
// eliminate it explicitly. static_assert below ensures that others won't slip #if BUILDFLAG(HAS_64_BIT_POINTERS)
// through.
#define PA_CONFIG_HAS_64_BITS_POINTERS() \
(defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL))
#if PA_CONFIG(HAS_64_BITS_POINTERS)
static_assert(sizeof(void*) == 8, ""); static_assert(sizeof(void*) == 8, "");
#else #else
static_assert(sizeof(void*) != 8, ""); static_assert(sizeof(void*) != 8, "");
#endif #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
// PCScan supports 64 bits only and is disabled outside Chromium. #if BUILDFLAG(HAS_64_BIT_POINTERS) && \
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#define PA_CONFIG_ALLOW_PCSCAN() 1
#else
#define PA_CONFIG_ALLOW_PCSCAN() 0
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && \
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP) (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1 #define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
#else #else
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0 #define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
#endif #endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS) #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
// Allow PA to select an alternate pool size at run-time before initialization, // Allow PA to select an alternate pool size at run-time before initialization,
// rather than using a single constexpr value. // rather than using a single constexpr value.
// //
@ -64,19 +52,19 @@ static_assert(sizeof(void*) != 8, "");
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1 #define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1
#else #else
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0 #define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
// Puts the regular and BRP pools right next to each other, so that we can // Puts the regular and BRP pools right next to each other, so that we can
// check "belongs to one of the two pools" with a single bitmask operation. // check "belongs to one of the two pools" with a single bitmask operation.
// //
// This setting is specific to 64-bit, as 32-bit has a different implementation. // This setting is specific to 64-bit, as 32-bit has a different implementation.
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS) #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS)
#define PA_CONFIG_GLUE_CORE_POOLS() 1 #define PA_CONFIG_GLUE_CORE_POOLS() 1
#else #else
#define PA_CONFIG_GLUE_CORE_POOLS() 0 #define PA_CONFIG_GLUE_CORE_POOLS() 0
#endif #endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && \ #if BUILDFLAG(HAS_64_BIT_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)) (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#include <linux/version.h> #include <linux/version.h>
// TODO(bikineev): Enable for ChromeOS. // TODO(bikineev): Enable for ChromeOS.
@ -84,10 +72,10 @@ static_assert(sizeof(void*) != 8, "");
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)) (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
#else #else
#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0 #define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && #endif // BUILDFLAG(HAS_64_BIT_POINTERS) &&
// (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)) // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
// Use card table to avoid races for PCScan configuration without safepoints. // Use card table to avoid races for PCScan configuration without safepoints.
// The card table provides the guaranteee that for a marked card the underling // The card table provides the guaranteee that for a marked card the underling
// super-page is fully initialized. // super-page is fully initialized.
@ -95,11 +83,7 @@ static_assert(sizeof(void*) != 8, "");
#else #else
// The card table is permanently disabled for 32-bit. // The card table is permanently disabled for 32-bit.
#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0 #define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE) && !PA_CONFIG(ALLOW_PCSCAN)
#error "Card table can only be used when *Scan is allowed"
#endif
// Use batched freeing when sweeping pages. This builds up a freelist in the // Use batched freeing when sweeping pages. This builds up a freelist in the
// scanner thread and appends to the slot-span's freelist only once. // scanner thread and appends to the slot-span's freelist only once.
@ -184,7 +168,7 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8); static_assert(sizeof(void*) == 8);
#endif #endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#define PA_CONFIG_USE_OOB_POISON() 1 #define PA_CONFIG_USE_OOB_POISON() 1
#else #else
#define PA_CONFIG_USE_OOB_POISON() 0 #define PA_CONFIG_USE_OOB_POISON() 0
@ -195,7 +179,7 @@ static_assert(sizeof(void*) == 8);
// Only applicable to code with 64-bit pointers. Currently conflicts with true // Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE. // hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \ #if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
PA_CONFIG(HAS_64_BITS_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING) BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
static_assert(sizeof(void*) == 8); static_assert(sizeof(void*) == 8);
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1 #define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
#else #else
@ -321,7 +305,7 @@ constexpr bool kUseLazyCommit = false;
// This feature is only enabled with 64-bit environment because pools work // This feature is only enabled with 64-bit environment because pools work
// differently with 32-bits pointers (see glossary). // differently with 32-bits pointers (see glossary).
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \ #if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
PA_CONFIG(HAS_64_BITS_POINTERS) BUILDFLAG(HAS_64_BIT_POINTERS)
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 1 #define PA_CONFIG_ENABLE_SHADOW_METADATA() 1
#else #else
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 0 #define PA_CONFIG_ENABLE_SHADOW_METADATA() 0
@ -340,7 +324,7 @@ constexpr bool kUseLazyCommit = false;
// Enables compressed (4-byte) pointers that can point within the core pools // Enables compressed (4-byte) pointers that can point within the core pools
// (Regular + BRP). // (Regular + BRP).
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 1 #define PA_CONFIG_POINTER_COMPRESSION() 1
#if !PA_CONFIG(GLUE_CORE_POOLS) #if !PA_CONFIG(GLUE_CORE_POOLS)
@ -354,7 +338,7 @@ constexpr bool kUseLazyCommit = false;
// TODO(1376980): Address MTE once it's enabled. // TODO(1376980): Address MTE once it's enabled.
#error "Compressed pointers don't support tag in the upper bits" #error "Compressed pointers don't support tag in the upper bits"
#endif #endif
#else // PA_CONFIG(HAS_64_BITS_POINTERS) && #else // BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_POINTER_COMPRESSION) // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 0 #define PA_CONFIG_POINTER_COMPRESSION() 0
#endif #endif

View File

@ -13,6 +13,7 @@
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -260,12 +261,29 @@ constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// PartitionAlloc's address space is split into pools. See `glossary.md`. // PartitionAlloc's address space is split into pools. See `glossary.md`.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS) enum pool_handle : unsigned {
constexpr size_t kNumPools = 4; kNullPoolHandle = 0u,
#else
constexpr size_t kNumPools = 3; kRegularPoolHandle,
kBRPPoolHandle,
#if BUILDFLAG(HAS_64_BIT_POINTERS)
kConfigurablePoolHandle,
#endif #endif
// New pool_handles will be added here.
#if BUILDFLAG(ENABLE_PKEYS)
// The pkey pool must come last since we pkey_mprotect its entry in the
// metadata tables, e.g. AddressPoolManager::aligned_pools_
kPkeyPoolHandle,
#endif
kMaxPoolHandle
};
// kNullPoolHandle doesn't have metadata, hence - 1
constexpr size_t kNumPools = kMaxPoolHandle - 1;
// Maximum pool size. With exception of Configurable Pool, it is also // Maximum pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
// allows to choose a different size at initialization time for certain // allows to choose a different size at initialization time for certain
@ -277,22 +295,18 @@ constexpr size_t kNumPools = 3;
// //
// When pointer compression is enabled, we cannot use large pools (at most // When pointer compression is enabled, we cannot use large pools (at most
// 8GB for each of the glued pools). // 8GB for each of the glued pools).
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION)
constexpr size_t kPoolMaxSize = 8 * kGiB; constexpr size_t kPoolMaxSize = 8 * kGiB;
#else #else
constexpr size_t kPoolMaxSize = 16 * kGiB; constexpr size_t kPoolMaxSize = 16 * kGiB;
#endif #endif
#else // PA_CONFIG(HAS_64_BITS_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr size_t kNumPools = 2;
constexpr size_t kPoolMaxSize = 4 * kGiB; constexpr size_t kPoolMaxSize = 4 * kGiB;
#endif #endif
constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize; constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
static constexpr pool_handle kRegularPoolHandle = 1;
static constexpr pool_handle kBRPPoolHandle = 2;
static constexpr pool_handle kConfigurablePoolHandle = 3;
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
static constexpr pool_handle kPkeyPoolHandle = 4;
static_assert( static_assert(
kPkeyPoolHandle == kNumPools, kPkeyPoolHandle == kNumPools,
"The pkey pool must come last since we pkey_mprotect its metadata."); "The pkey pool must come last since we pkey_mprotect its metadata.");
@ -327,7 +341,7 @@ constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool; return kMaxSuperPagesInPool;
} }
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size, // In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools. // because this is the reservation granularity of the pools.
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
@ -337,7 +351,7 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift; return kSuperPageShift;
} }
#else // PA_CONFIG(HAS_64_BITS_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system // In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation // allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps // unit. However, don't go below partition page size, so that pool bitmaps
@ -351,7 +365,7 @@ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() { DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift()); return std::max(PageAllocationGranularityShift(), PartitionPageShift());
} }
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() { DirectMapAllocationGranularityOffsetMask() {

View File

@ -0,0 +1,50 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#include "base/allocator/partition_allocator/partition_alloc.h"
namespace partition_alloc {
namespace internal {
constexpr bool AllowLeaks = true;
constexpr bool DisallowLeaks = false;
// A subclass of PartitionAllocator for testing. It will free all resources,
// i.e. allocated memory, memory inside freelist, and so on, when destructing
// it or when manually invoking reset().
// If need to check if there are any memory allocated but not freed yet,
// use allow_leaks=false. We will see CHECK failure inside reset() if any
// leak is detected. Otherwise (e.g. intentional leaks), use allow_leaks=true.
template <bool thread_safe, bool allow_leaks>
struct PartitionAllocatorForTesting : public PartitionAllocator<thread_safe> {
PartitionAllocatorForTesting() : PartitionAllocator<thread_safe>() {}
explicit PartitionAllocatorForTesting(PartitionOptions opts)
: PartitionAllocator<thread_safe>() {
PartitionAllocator<thread_safe>::init(opts);
}
~PartitionAllocatorForTesting() { reset(); }
PA_ALWAYS_INLINE void reset() {
PartitionAllocator<thread_safe>::root()->ResetForTesting(allow_leaks);
}
};
} // namespace internal
using PartitionAllocatorForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::DisallowLeaks>;
using PartitionAllocatorAllowLeaksForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::AllowLeaks>;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_

View File

@ -32,6 +32,8 @@ std::atomic<PartitionAllocHooks::FreeOverrideHook*>
PartitionAllocHooks::free_override_hook_(nullptr); PartitionAllocHooks::free_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::ReallocOverrideHook*> std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
PartitionAllocHooks::realloc_override_hook_(nullptr); PartitionAllocHooks::realloc_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::QuarantineOverrideHook*>
PartitionAllocHooks::quarantine_override_hook_(nullptr);
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) { FreeObserverHook* free_hook) {
@ -118,4 +120,9 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
return false; return false;
} }
void PartitionAllocHooks::SetQuarantineOverrideHook(
QuarantineOverrideHook* hook) {
quarantine_override_hook_.store(hook, std::memory_order_release);
}
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -8,6 +8,7 @@
#include <atomic> #include <atomic>
#include <cstddef> #include <cstddef>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc { namespace partition_alloc {
@ -34,6 +35,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
// the size of the underlying allocation. // the size of the underlying allocation.
typedef bool ReallocOverrideHook(size_t* out, void* address); typedef bool ReallocOverrideHook(size_t* out, void* address);
// Special hook type, independent of the rest. Triggered when `free()` detects
// outstanding references to the allocation.
// IMPORTANT: Make sure the hook always overwrites `[address, address + size)`
// with a bit pattern that cannot be interpreted as a valid memory address.
typedef void QuarantineOverrideHook(void* address, size_t size);
// To unhook, call Set*Hooks with nullptrs. // To unhook, call Set*Hooks with nullptrs.
static void SetObserverHooks(AllocationObserverHook* alloc_hook, static void SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook); FreeObserverHook* free_hook);
@ -65,6 +72,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
const char* type_name); const char* type_name);
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address); static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
PA_ALWAYS_INLINE static QuarantineOverrideHook* GetQuarantineOverrideHook() {
return quarantine_override_hook_.load(std::memory_order_acquire);
}
static void SetQuarantineOverrideHook(QuarantineOverrideHook* hook);
private: private:
// Single bool that is used to indicate whether observer or allocation hooks // Single bool that is used to indicate whether observer or allocation hooks
// are set to reduce the numbers of loads required to check whether hooking is // are set to reduce the numbers of loads required to check whether hooking is
@ -78,6 +91,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
static std::atomic<AllocationOverrideHook*> allocation_override_hook_; static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
static std::atomic<FreeOverrideHook*> free_override_hook_; static std::atomic<FreeOverrideHook*> free_override_hook_;
static std::atomic<ReallocOverrideHook*> realloc_override_hook_; static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
static std::atomic<QuarantineOverrideHook*> quarantine_override_hook_;
}; };
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -38,7 +38,7 @@
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/state_bitmap.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif #endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -74,7 +74,7 @@ template <bool thread_safe>
PA_IMMEDIATE_CRASH(); // Not required, kept as documentation. PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
} }
#if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means // |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
// that a partial super page is allowed at the end. Since the block list uses // that a partial super page is allowed at the end. Since the block list uses
// kSuperPageSize granularity, a partial super page is considered blocked if // kSuperPageSize granularity, a partial super page is considered blocked if
@ -93,7 +93,7 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
} }
return true; return true;
} }
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) && #endif // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// Reserves |requested_size| worth of super pages from the specified pool. // Reserves |requested_size| worth of super pages from the specified pool.
@ -123,7 +123,7 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
// In 32-bit mode, when allocating from BRP pool, verify that the requested // In 32-bit mode, when allocating from BRP pool, verify that the requested
// allocation honors the block list. Find a better address otherwise. // allocation honors the block list. Find a better address otherwise.
#if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) { if (pool == kBRPPoolHandle) {
constexpr int kMaxRandomAddressTries = 10; constexpr int kMaxRandomAddressTries = 10;
for (int i = 0; i < kMaxRandomAddressTries; ++i) { for (int i = 0; i < kMaxRandomAddressTries; ++i) {
@ -172,10 +172,10 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
reserved_address = 0; reserved_address = 0;
} }
} }
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) && #endif // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
// Only mark the region as belonging to the pool after it has passed the // Only mark the region as belonging to the pool after it has passed the
// blocklist check in order to avoid a potential race with destructing a // blocklist check in order to avoid a potential race with destructing a
// raw_ptr<T> object that points to non-PA memory in another thread. // raw_ptr<T> object that points to non-PA memory in another thread.
@ -284,7 +284,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{ {
// Reserving memory from the pool is actually not a syscall on 64 bit // Reserving memory from the pool is actually not a syscall on 64 bit
// platforms. // platforms.
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
#endif #endif
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size); reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
@ -434,7 +434,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{ {
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start, AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size); reservation_size);
#endif #endif

View File

@ -9,8 +9,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -39,13 +39,13 @@ constexpr size_t OrderSubIndexMask(uint8_t order) {
(kNumBucketsPerOrderBits + 1); (kNumBucketsPerOrderBits + 1);
} }
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
#define PA_BITS_PER_SIZE_T 64 #define PA_BITS_PER_SIZE_T 64
static_assert(kBitsPerSizeT == 64, ""); static_assert(kBitsPerSizeT == 64, "");
#else #else
#define PA_BITS_PER_SIZE_T 32 #define PA_BITS_PER_SIZE_T 32
static_assert(kBitsPerSizeT == 32, ""); static_assert(kBitsPerSizeT == 32, "");
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = { inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2), OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),

View File

@ -249,9 +249,7 @@ class PartitionFreelistEntry {
(next_address & kSuperPageBaseMask); (next_address & kSuperPageBaseMask);
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
bool marked_as_free_in_bitmap = bool marked_as_free_in_bitmap =
for_thread_cache for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
? true
: !FreeSlotBitmapSlotIsUsed(reinterpret_cast<uintptr_t>(next));
#else #else
bool marked_as_free_in_bitmap = true; bool marked_as_free_in_bitmap = true;
#endif #endif

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
@ -323,23 +322,26 @@ void UnmapNow(uintptr_t reservation_start,
// In 32-bit mode, the beginning of a reservation may be excluded from the // In 32-bit mode, the beginning of a reservation may be excluded from the
// BRP pool, so shift the pointer. Other pools don't have this logic. // BRP pool, so shift the pointer. Other pools don't have this logic.
PA_DCHECK(IsManagedByPartitionAllocBRPPool( PA_DCHECK(IsManagedByPartitionAllocBRPPool(
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
reservation_start reservation_start
#else #else
reservation_start + reservation_start +
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
)); ));
} else } else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{ {
PA_DCHECK( PA_DCHECK(pool == kRegularPoolHandle
pool == kRegularPoolHandle
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
|| pool == kPkeyPoolHandle || pool == kPkeyPoolHandle
#endif #endif
|| (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)); #if BUILDFLAG(HAS_64_BIT_POINTERS)
||
(IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)
#endif
);
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode. // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) || PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
@ -365,7 +367,7 @@ void UnmapNow(uintptr_t reservation_start,
*offset_ptr++ = kOffsetTagNotAllocated; *offset_ptr++ = kOffsetTagNotAllocated;
} }
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start, AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size); reservation_size);
#endif #endif

View File

@ -21,7 +21,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h" #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
@ -138,7 +137,7 @@ struct SlotSpanMetadata {
PartitionBucket<thread_safe>* const bucket = nullptr; PartitionBucket<thread_safe>* const bucket = nullptr;
// CHECK()ed in AllocNewSlotSpan(). // CHECK()ed in AllocNewSlotSpan().
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE) #if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
// System page size is not a constant on Apple OSes, but is either 4 or 16kiB // System page size is not a constant on Apple OSes, but is either 4 or 16kiB
// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And // (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
// PartitionPageSize() is 4 times the OS page size. // PartitionPageSize() is 4 times the OS page size.
@ -155,7 +154,7 @@ struct SlotSpanMetadata {
// larger, so it doesn't have as many slots. // larger, so it doesn't have as many slots.
static constexpr size_t kMaxSlotsPerSlotSpan = static constexpr size_t kMaxSlotsPerSlotSpan =
PartitionPageSize() / kSmallestBucket; PartitionPageSize() / kSmallestBucket;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE) #endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
// The maximum number of bits needed to cover all currently supported OSes. // The maximum number of bits needed to cover all currently supported OSes.
static constexpr size_t kMaxSlotsPerSlotSpanBits = 13; static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), ""); static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
@ -482,7 +481,8 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
return reinterpret_cast<AllocationStateMap*>( return reinterpret_cast<AllocationStateMap*>(
SuperPageStateBitmapAddr(super_page)); SuperPageStateBitmapAddr(super_page));
} }
#else
#else // BUILDFLAG(USE_STARSCAN)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() { ReservedStateBitmapSize() {

View File

@ -736,13 +736,24 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
// this function on PartitionRoots without a thread cache. // this function on PartitionRoots without a thread cache.
PA_CHECK(!flags.with_thread_cache); PA_CHECK(!flags.with_thread_cache);
auto pool_handle = ChoosePool(); auto pool_handle = ChoosePool();
#if BUILDFLAG(ENABLE_PKEYS)
// The pages managed by pkey will be free-ed at UninitPKeyForTesting().
// Don't invoke FreePages() for the pages.
if (pool_handle == internal::kPkeyPoolHandle) {
return;
}
PA_DCHECK(pool_handle < internal::kNumPools);
#else
PA_DCHECK(pool_handle <= internal::kNumPools);
#endif
auto* curr = first_extent; auto* curr = first_extent;
while (curr != nullptr) { while (curr != nullptr) {
auto* next = curr->next; auto* next = curr->next;
uintptr_t address = SuperPagesBeginFromExtent(curr); uintptr_t address = SuperPagesBeginFromExtent(curr);
size_t size = size_t size =
internal::kSuperPageSize * curr->number_of_consecutive_super_pages; internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address, internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
size); size);
#endif #endif
@ -759,7 +770,7 @@ void PartitionRoot<thread_safe>::EnableMac11MallocSizeHackForTesting() {
} }
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK) #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace { namespace {
std::atomic<bool> g_reserve_brp_guard_region_called; std::atomic<bool> g_reserve_brp_guard_region_called;
// An address constructed by repeating `kQuarantinedByte` shouldn't never point // An address constructed by repeating `kQuarantinedByte` shouldn't never point
@ -795,7 +806,7 @@ void ReserveBackupRefPtrGuardRegionIfNeeded() {
} }
} // namespace } // namespace
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
// !PA_CONFIG(HAS_64_BITS_POINTERS) // !BUILDFLAG(HAS_64_BIT_POINTERS)
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) { void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
@ -824,12 +835,12 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// running on the right hardware. // running on the right hardware.
::partition_alloc::internal::InitializeMTESupportIfNeeded(); ::partition_alloc::internal::InitializeMTESupportIfNeeded();
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// Reserve address space for partition alloc. // Reserve address space for partition alloc.
internal::PartitionAddressSpace::Init(); internal::PartitionAddressSpace::Init();
#endif #endif
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
ReserveBackupRefPtrGuardRegionIfNeeded(); ReserveBackupRefPtrGuardRegionIfNeeded();
#endif #endif
@ -1499,6 +1510,73 @@ void PartitionRoot<thread_safe>::DeleteForTesting(
delete partition_root; delete partition_root;
} }
template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetForTesting(bool allow_leaks) {
if (flags.with_thread_cache) {
ThreadCache::SwapForTesting(nullptr);
flags.with_thread_cache = false;
}
::partition_alloc::internal::ScopedGuard guard(lock_);
#if BUILDFLAG(PA_DCHECK_IS_ON)
if (!allow_leaks) {
unsigned num_allocated_slots = 0;
for (Bucket& bucket : buckets) {
if (bucket.active_slot_spans_head !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket.active_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
num_allocated_slots += slot_span->num_allocated_slots;
}
}
// Full slot spans are nowhere. Need to see bucket.num_full_slot_spans
// to count the number of full slot spans' slots.
if (bucket.num_full_slot_spans) {
num_allocated_slots +=
bucket.num_full_slot_spans * bucket.get_slots_per_span();
}
}
PA_DCHECK(num_allocated_slots == 0);
// Check for direct-mapped allocations.
PA_DCHECK(!direct_map_list);
}
#endif
DestructForTesting(); // IN-TEST
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
if (initialized) {
internal::PartitionRootEnumerator::Instance().Unregister(this);
}
#endif // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
for (Bucket& bucket : buckets) {
bucket.active_slot_spans_head =
SlotSpan::get_sentinel_slot_span_non_const();
bucket.empty_slot_spans_head = nullptr;
bucket.decommitted_slot_spans_head = nullptr;
bucket.num_full_slot_spans = 0;
}
next_super_page = 0;
next_partition_page = 0;
next_partition_page_end = 0;
current_extent = nullptr;
first_extent = nullptr;
direct_map_list = nullptr;
for (auto& entity : global_empty_slot_span_ring) {
entity = nullptr;
}
global_empty_slot_span_ring_index = 0;
global_empty_slot_span_ring_size = internal::kDefaultEmptySlotSpanRingSize;
initialized = false;
}
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() { void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() {
::partition_alloc::internal::ScopedGuard guard{lock_}; ::partition_alloc::internal::ScopedGuard guard{lock_};

View File

@ -74,7 +74,6 @@
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#endif #endif
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
@ -406,6 +405,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionRoot() PartitionRoot()
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {} : flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); } explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
// TODO(tasak): remove ~PartitionRoot() after confirming all tests
// don't need ~PartitionRoot().
~PartitionRoot(); ~PartitionRoot();
// This will unreserve any space in the pool that the PartitionRoot is // This will unreserve any space in the pool that the PartitionRoot is
@ -585,6 +586,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionStatsDumper* partition_stats_dumper); PartitionStatsDumper* partition_stats_dumper);
static void DeleteForTesting(PartitionRoot* partition_root); static void DeleteForTesting(PartitionRoot* partition_root);
void ResetForTesting(bool allow_leaks);
void ResetBookkeepingForTesting(); void ResetBookkeepingForTesting();
PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const { PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
@ -653,10 +655,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
} }
internal::pool_handle ChoosePool() const { internal::pool_handle ChoosePool() const {
#if BUILDFLAG(HAS_64_BIT_POINTERS)
if (flags.use_configurable_pool) { if (flags.use_configurable_pool) {
PA_DCHECK(IsConfigurablePoolAvailable()); PA_DCHECK(IsConfigurablePoolAvailable());
return internal::kConfigurablePoolHandle; return internal::kConfigurablePoolHandle;
} }
#endif
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
if (flags.pkey != internal::kDefaultPkey) { if (flags.pkey != internal::kDefaultPkey) {
return internal::kPkeyPoolHandle; return internal::kPkeyPoolHandle;
@ -962,13 +966,13 @@ class ScopedSyscallTimer {
PA_ALWAYS_INLINE uintptr_t PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) { PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
PA_DCHECK(IsManagedByPartitionAllocBRPPool(address)); PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// Use this variant of GetDirectMapReservationStart as it has better // Use this variant of GetDirectMapReservationStart as it has better
// performance. // performance.
uintptr_t offset = OffsetInBRPPool(address); uintptr_t offset = OffsetInBRPPool(address);
uintptr_t reservation_start = uintptr_t reservation_start =
GetDirectMapReservationStart(address, kBRPPoolHandle, offset); GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
#else #else // BUILDFLAG(HAS_64_BIT_POINTERS)
uintptr_t reservation_start = GetDirectMapReservationStart(address); uintptr_t reservation_start = GetDirectMapReservationStart(address);
#endif #endif
if (!reservation_start) { if (!reservation_start) {
@ -1396,8 +1400,13 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// potential use-after-free issues into unexploitable crashes. // potential use-after-free issues into unexploitable crashes.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() && if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
brp_zapping_enabled())) { brp_zapping_enabled())) {
internal::SecureMemset(object, internal::kQuarantinedByte, auto usable_size = slot_span->GetUsableSize(this);
slot_span->GetUsableSize(this)); auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
if (PA_UNLIKELY(hook)) {
hook(object, usable_size);
} else {
internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
}
} }
if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) { if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {

View File

@ -11,12 +11,12 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
#error "pkey support requires 64 bit pointers" #error "pkey support requires 64 bit pointers"
#endif #endif

View File

@ -85,130 +85,90 @@ namespace base {
// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a // NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
// lightweight replacement of a raw pointer, hence performance is critical. // lightweight replacement of a raw pointer, hence performance is critical.
// This is a bitfield representing the different flags that can be applied to a
// raw_ptr.
//
// Internal use only: Developers shouldn't use those values directly.
//
// Housekeeping rules: Try not to change trait values, so that numeric trait
// values stay constant across builds (could be useful e.g. when analyzing stack
// traces). A reasonable exception to this rule are `*ForTest` traits. As a
// matter of fact, we propose that new non-test traits are added before the
// `*ForTest` traits.
enum class RawPtrTraits : unsigned {
kEmpty = 0,
// Disables dangling pointer detection, but keeps other raw_ptr protections.
//
// Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
// instead.
kMayDangle = (1 << 0),
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Disables any protections when MTECheckedPtrImpl is requested, by
// switching to NoOpImpl in that case.
//
// Don't use directly, use DegradeToNoOpWhenMTE instead.
kDisableMTECheckedPtr = (1 << 1),
#else
kDisableMTECheckedPtr = kEmpty,
#endif
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
// Disables any hooks, by switching to NoOpImpl in that case.
//
// Internal use only.
kDisableHooks = (1 << 2),
#else
kDisableHooks = kEmpty,
#endif
// Pointer arithmetic is discouraged and disabled by default.
//
// Don't use directly, use AllowPtrArithmetic instead.
kAllowPtrArithmetic = (1 << 3),
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track
// the number of times the raw_ptr is wrapped, unwrapped, etc.
//
// Test only.
kUseCountingWrapperForTest = (1 << 4),
};
// Used to combine RawPtrTraits:
constexpr RawPtrTraits operator|(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) |
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator&(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) &
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator~(RawPtrTraits a) {
return static_cast<RawPtrTraits>(~static_cast<unsigned>(a));
}
namespace raw_ptr_traits { namespace raw_ptr_traits {
// Disables dangling pointer detection, but keeps other raw_ptr protections. constexpr bool Contains(RawPtrTraits a, RawPtrTraits b) {
// Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged return (a & b) != RawPtrTraits::kEmpty;
// instead. }
struct MayDangle {};
// Disables any protections when MTECheckedPtrImpl is requested, by switching to
// NoOpImpl in that case.
// Don't use directly, use DegradeToNoOpWhenMTE instead.
struct DisableMTECheckedPtr {};
// Disables any hooks, by switching to NoOpImpl in that case.
// Internal use only.
struct DisableHooks {};
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track the
// number of times the raw_ptr is wrapped, unrwapped, etc.
// Test only.
struct UseCountingWrapperForTest {};
// Very internal use only.
using EmptyTrait = void;
template <typename Trait> constexpr RawPtrTraits Remove(RawPtrTraits a, RawPtrTraits b) {
inline constexpr bool IsValidTraitV = return a & ~b;
std::is_same_v<Trait, MayDangle> || }
std::is_same_v<Trait, DisableMTECheckedPtr> ||
std::is_same_v<Trait, DisableHooks> ||
std::is_same_v<Trait, UseCountingWrapperForTest> ||
std::is_same_v<Trait, EmptyTrait>;
template <typename... Traits> constexpr bool AreValid(RawPtrTraits traits) {
struct TraitPack { return Remove(traits, RawPtrTraits::kMayDangle |
static_assert((IsValidTraitV<Traits> && ...), "Unknown raw_ptr trait"); RawPtrTraits::kDisableMTECheckedPtr |
RawPtrTraits::kDisableHooks |
RawPtrTraits::kAllowPtrArithmetic |
RawPtrTraits::kUseCountingWrapperForTest) ==
RawPtrTraits::kEmpty;
}
template <typename TraitToSearch> template <RawPtrTraits Traits>
static inline constexpr bool HasV =
(std::is_same_v<TraitToSearch, Traits> || ...);
};
// Replaces an unwanted trait with EmptyTrait.
template <typename TraitToExclude>
struct ExcludeTrait {
template <typename Trait>
using Filter = std::
conditional_t<std::is_same_v<TraitToExclude, Trait>, EmptyTrait, Trait>;
};
// Use TraitBundle alias, instead of TraitBundleInt, so that traits in different
// order and duplicates resolve to the same underlying type. For example,
// TraitBundle<A,B> is the same C++ type as TraitBundle<B,A,B,A>. This also
// allows to entirely ignore a trait under some build configurations, to prevent
// it from turning TraitBundle into a different C++ type.
//
// It'd be easier to just pass bools into TraitBundleInt, instead of echo'ing
// the trait, but that would lead to less readable compiler messages that spit
// out the type. TraitBundleInt<MayDangle,EmptyTrait,DisableHooks,EmptyTrait> is
// more readable than TraitBundleInt<true,false,true,false>.
template <typename... Traits>
struct TraitBundleInt;
template <typename... Traits>
using TraitBundle = TraitBundleInt<
std::conditional_t<TraitPack<Traits...>::template HasV<MayDangle>,
MayDangle,
EmptyTrait>,
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
std::conditional_t<
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>,
DisableMTECheckedPtr,
EmptyTrait>,
#else
// Entirely ignore DisableMTECheckedPtr on non-MTECheckedPtr builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<TraitPack<Traits...>::template HasV<DisableHooks>,
DisableHooks,
EmptyTrait>,
#else
// Entirely ignore DisableHooks on non-ASanBRP builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>,
UseCountingWrapperForTest,
EmptyTrait>>;
template <typename... Traits>
struct TraitBundleInt {
static constexpr bool kMayDangle =
TraitPack<Traits...>::template HasV<MayDangle>;
static constexpr bool kDisableMTECheckedPtr =
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>;
static constexpr bool kDisableHooks =
TraitPack<Traits...>::template HasV<DisableHooks>;
static constexpr bool kUseCountingWrapperForTest =
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>;
// Assert that on certain build configurations, the related traits are not
// even used. If they were, they'd result in a different C++ type, and would
// trigger more costly cross-type raw_ptr/raw_ref conversions.
#if !PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
static_assert(!kDisableMTECheckedPtr);
#endif
#if !BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
static_assert(!kDisableHooks);
#endif
// Use TraitBundle, instead of TraitBundleInt, to re-normalize trait list
// (i.e. order canonically and remove duplicates).
template <typename TraitToAdd>
using AddTraitT = TraitBundle<Traits..., TraitToAdd>;
// Unlike AddTraitT, no need to re-normalize because ExcludeTrait preserves
// the trait list structure.
template <typename TraitToRemove>
using RemoveTraitT = TraitBundleInt<
typename ExcludeTrait<TraitToRemove>::template Filter<Traits>...>;
};
template <typename TraitBundle>
struct TraitsToImpl; struct TraitsToImpl;
} // namespace raw_ptr_traits } // namespace raw_ptr_traits
@ -346,17 +306,33 @@ struct MTECheckedPtrImpl {
// Wraps a pointer, and returns its uintptr_t representation. // Wraps a pointer, and returns its uintptr_t representation.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
// Catch the obviously unsupported cases, e.g. `nullptr` or `-1ull`.
//
// `ExtractPtr(ptr)` should be functionally identical to `ptr` for
// the purposes of `EnabledForPtr()`, since we assert that `ptr` is
// an untagged raw pointer (there are no tag bits provided by
// MTECheckedPtr to strip off). However, something like `-1ull`
// looks identical to a fully tagged-up pointer. We'll add a check
// here just to make sure there's no difference in the support check
// whether extracted or not.
const bool extracted_supported =
PartitionAllocSupport::EnabledForPtr(ExtractPtr(ptr));
const bool raw_supported = PartitionAllocSupport::EnabledForPtr(ptr);
PA_BASE_DCHECK(extracted_supported == raw_supported);
// At the expense of consistency, we use the `raw_supported`
// condition. When wrapping a raw pointer, we assert that having set
// bits conflatable with the MTECheckedPtr tag disqualifies `ptr`
// from support.
if (!raw_supported) {
return ptr;
}
// Disambiguation: UntagPtr removes the hardware MTE tag, whereas this // Disambiguation: UntagPtr removes the hardware MTE tag, whereas this
// function is responsible for adding the software MTE tag. // function is responsible for adding the software MTE tag.
uintptr_t addr = partition_alloc::UntagPtr(ptr); uintptr_t addr = partition_alloc::UntagPtr(ptr);
PA_BASE_DCHECK(ExtractTag(addr) == 0ull); PA_BASE_DCHECK(ExtractTag(addr) == 0ull);
// Return a not-wrapped |addr|, if it's either nullptr or if the protection
// for this pointer is disabled.
if (!PartitionAllocSupport::EnabledForPtr(ptr)) {
return ptr;
}
// Read the tag and place it in the top bits of the address. // Read the tag and place it in the top bits of the address.
// Even if PartitionAlloc's tag has less than kTagBits, we'll read // Even if PartitionAlloc's tag has less than kTagBits, we'll read
// what's given and pad the rest with 0s. // what's given and pad the rest with 0s.
@ -407,19 +383,30 @@ struct MTECheckedPtrImpl {
return wrapped_ptr; return wrapped_ptr;
} }
// Unwraps the pointer's uintptr_t representation, while asserting that memory // Unwraps the pointer as a T*, without making an assertion on whether
// hasn't been freed. The function must handle nullptr gracefully. // memory was freed or not.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
// SafelyUnwrapPtrForDereference handles nullptr case well. // Return `wrapped_ptr` straightaway if protection is disabled, e.g.
return SafelyUnwrapPtrForDereference(wrapped_ptr); // when `ptr` is `nullptr` or `uintptr_t{-1ull}`.
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
} }
// Unwraps the pointer's uintptr_t representation, without making an assertion // Unwraps the pointer's uintptr_t representation, without making an assertion
// on whether memory was freed or not. // on whether memory was freed or not.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
return ExtractPtr(wrapped_ptr); // Return `wrapped_ptr` straightaway if protection is disabled, e.g.
// when `ptr` is `nullptr` or `uintptr_t{-1ull}`.
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
} }
// Upcasts the wrapped pointer. // Upcasts the wrapped pointer.
@ -522,10 +509,12 @@ struct MTECheckedPtrImpl {
// wrapped, unrwapped, etc. // wrapped, unrwapped, etc.
// //
// Test only. // Test only.
template <typename Traits> template <RawPtrTraits Traits>
struct RawPtrCountingImplWrapperForTest struct RawPtrCountingImplWrapperForTest
: public raw_ptr_traits::TraitsToImpl<Traits>::Impl { : public raw_ptr_traits::TraitsToImpl<Traits>::Impl {
static_assert(!Traits::kUseCountingWrapperForTest); static_assert(
!raw_ptr_traits::Contains(Traits,
RawPtrTraits::kUseCountingWrapperForTest));
using SuperImpl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl; using SuperImpl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -685,31 +674,36 @@ struct IsSupportedType<T,
#undef PA_WINDOWS_HANDLE_TYPE #undef PA_WINDOWS_HANDLE_TYPE
#endif #endif
template <typename Traits> template <RawPtrTraits Traits>
struct TraitsToImpl { struct TraitsToImpl {
static_assert(AreValid(Traits), "Unknown raw_ptr trait(s)");
private: private:
// UnderlyingImpl is the struct that provides the implementation of the // UnderlyingImpl is the struct that provides the implementation of the
// protections related to raw_ptr. // protections related to raw_ptr.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
using UnderlyingImpl = internal::RawPtrBackupRefImpl< using UnderlyingImpl = internal::RawPtrBackupRefImpl<
/*AllowDangling=*/Traits::kMayDangle>; /*allow_dangling=*/Contains(Traits, RawPtrTraits::kMayDangle)>;
#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR) #elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
using UnderlyingImpl = using UnderlyingImpl = std::conditional_t<
std::conditional_t<Traits::kMayDangle, Contains(Traits, RawPtrTraits::kMayDangle),
// No special bookkeeping required for this case, // No special bookkeeping required for this case,
// just treat these as ordinary pointers. // just treat these as ordinary pointers.
internal::RawPtrNoOpImpl, internal::RawPtrNoOpImpl,
internal::RawPtrAsanUnownedImpl>; internal::RawPtrAsanUnownedImpl<
Contains(Traits, RawPtrTraits::kAllowPtrArithmetic)>>;
#elif PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #elif PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
using UnderlyingImpl = using UnderlyingImpl =
std::conditional_t<Traits::kDisableMTECheckedPtr, std::conditional_t<Contains(Traits, RawPtrTraits::kDisableMTECheckedPtr),
internal::RawPtrNoOpImpl, internal::RawPtrNoOpImpl,
internal::MTECheckedPtrImpl< internal::MTECheckedPtrImpl<
internal::MTECheckedPtrImplPartitionAllocSupport>>; internal::MTECheckedPtrImplPartitionAllocSupport>>;
#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR) #elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
using UnderlyingImpl = std::conditional_t<Traits::kDisableHooks, using UnderlyingImpl =
internal::RawPtrNoOpImpl, std::conditional_t<Contains(Traits, RawPtrTraits::kDisableHooks),
internal::RawPtrHookableImpl>; internal::RawPtrNoOpImpl,
internal::RawPtrHookableImpl>;
#else #else
using UnderlyingImpl = internal::RawPtrNoOpImpl; using UnderlyingImpl = internal::RawPtrNoOpImpl;
#endif #endif
@ -720,9 +714,9 @@ struct TraitsToImpl {
// Impl may be different from UnderlyingImpl, because it may include a // Impl may be different from UnderlyingImpl, because it may include a
// wrapper. // wrapper.
using Impl = std::conditional_t< using Impl = std::conditional_t<
Traits::kUseCountingWrapperForTest, Contains(Traits, RawPtrTraits::kUseCountingWrapperForTest),
internal::RawPtrCountingImplWrapperForTest< internal::RawPtrCountingImplWrapperForTest<
typename Traits::template RemoveTraitT<UseCountingWrapperForTest>>, Remove(Traits, RawPtrTraits::kUseCountingWrapperForTest)>,
UnderlyingImpl>; UnderlyingImpl>;
}; };
@ -754,13 +748,11 @@ struct TraitsToImpl {
// non-default move constructor/assignment. Thus, it's possible to get an error // non-default move constructor/assignment. Thus, it's possible to get an error
// where the pointer is not actually dangling, and have to work around the // where the pointer is not actually dangling, and have to work around the
// compiler. We have not managed to construct such an example in Chromium yet. // compiler. We have not managed to construct such an example in Chromium yet.
template <typename T, typename Traits = raw_ptr_traits::TraitBundle<>> template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr { class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// Type to return from ExtractAsDangling(), which is identical except // Type to return from ExtractAsDangling(), which is identical except
// MayDangle trait is added (if one isn't there already). // kMayDangle trait is added (if one isn't there already).
using DanglingRawPtrType = using DanglingRawPtrType = raw_ptr<T, Traits | RawPtrTraits::kMayDangle>;
raw_ptr<T,
typename Traits::template AddTraitT<raw_ptr_traits::MayDangle>>;
public: public:
using Impl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl; using Impl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -842,17 +834,15 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
template < template <RawPtrTraits PassedTraits,
typename PassedTraits, typename Unused = std::enable_if_t<Traits != PassedTraits>>
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
PA_ALWAYS_INLINE explicit raw_ptr(const raw_ptr<T, PassedTraits>& p) noexcept PA_ALWAYS_INLINE explicit raw_ptr(const raw_ptr<T, PassedTraits>& p) noexcept
: wrapped_ptr_(Impl::WrapRawPtrForDuplication( : wrapped_ptr_(Impl::WrapRawPtrForDuplication(
raw_ptr_traits::TraitsToImpl<PassedTraits>::Impl:: raw_ptr_traits::TraitsToImpl<PassedTraits>::Impl::
UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {} UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {}
template < template <RawPtrTraits PassedTraits,
typename PassedTraits, typename Unused = std::enable_if_t<Traits != PassedTraits>>
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
PA_ALWAYS_INLINE raw_ptr& operator=( PA_ALWAYS_INLINE raw_ptr& operator=(
const raw_ptr<T, PassedTraits>& p) noexcept { const raw_ptr<T, PassedTraits>& p) noexcept {
Impl::ReleaseWrappedPtr(wrapped_ptr_); Impl::ReleaseWrappedPtr(wrapped_ptr_);
@ -1004,20 +994,23 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
return *this += -delta_elems; return *this += -delta_elems;
} }
template < // Do not disable operator+() and operator-().
typename Z, // They provide OOB checks. Keep them enabled, which may be blocked later when
typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>> // attempting to apply the += or -= operation, when disabled. In the absence
// of operators +/-, the compiler is free to implicitly convert to the
// underlying T* representation and perform ordinary pointer arithmetic, thus
// invalidating the purpose behind disabling them.
template <typename Z>
friend PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p, Z delta_elems) { friend PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p; raw_ptr result = p;
return result += delta_elems; return result += delta_elems;
} }
template < template <typename Z>
typename Z,
typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>>
friend PA_ALWAYS_INLINE raw_ptr operator-(const raw_ptr& p, Z delta_elems) { friend PA_ALWAYS_INLINE raw_ptr operator-(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p; raw_ptr result = p;
return result -= delta_elems; return result -= delta_elems;
} }
friend PA_ALWAYS_INLINE ptrdiff_t operator-(const raw_ptr& p1, friend PA_ALWAYS_INLINE ptrdiff_t operator-(const raw_ptr& p1,
const raw_ptr& p2) { const raw_ptr& p2) {
return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_); return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_);
@ -1086,22 +1079,22 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work, // `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work,
// because a comparison operator defined inline would not be allowed to call // because a comparison operator defined inline would not be allowed to call
// `raw_ptr<U>`'s private `GetForComparison()` method. // `raw_ptr<U>`'s private `GetForComparison()` method.
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2> template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
@ -1211,41 +1204,41 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// #union, #global-scope, #constexpr-ctor-field-initializer // #union, #global-scope, #constexpr-ctor-field-initializer
RAW_PTR_EXCLUSION T* wrapped_ptr_; RAW_PTR_EXCLUSION T* wrapped_ptr_;
template <typename U, typename R> template <typename U, base::RawPtrTraits R>
friend class raw_ptr; friend class raw_ptr;
}; };
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() == rhs.GetForComparison(); return lhs.GetForComparison() == rhs.GetForComparison();
} }
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return !(lhs == rhs); return !(lhs == rhs);
} }
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() < rhs.GetForComparison(); return lhs.GetForComparison() < rhs.GetForComparison();
} }
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() > rhs.GetForComparison(); return lhs.GetForComparison() > rhs.GetForComparison();
} }
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() <= rhs.GetForComparison(); return lhs.GetForComparison() <= rhs.GetForComparison();
} }
template <typename U, typename V, typename Traits1, typename Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() >= rhs.GetForComparison(); return lhs.GetForComparison() >= rhs.GetForComparison();
@ -1254,7 +1247,7 @@ PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
template <typename T> template <typename T>
struct IsRawPtr : std::false_type {}; struct IsRawPtr : std::false_type {};
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {}; struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -1263,9 +1256,9 @@ inline constexpr bool IsRawPtrV = IsRawPtr<T>::value;
template <typename T> template <typename T>
inline constexpr bool IsRawPtrMayDangleV = false; inline constexpr bool IsRawPtrMayDangleV = false;
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> = inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
Traits::kMayDangle; raw_ptr_traits::Contains(Traits, RawPtrTraits::kMayDangle);
// Template helpers for working with T* or raw_ptr<T>. // Template helpers for working with T* or raw_ptr<T>.
template <typename T> template <typename T>
@ -1274,7 +1267,7 @@ struct IsPointer : std::false_type {};
template <typename T> template <typename T>
struct IsPointer<T*> : std::true_type {}; struct IsPointer<T*> : std::true_type {};
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
struct IsPointer<raw_ptr<T, Traits>> : std::true_type {}; struct IsPointer<raw_ptr<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -1290,7 +1283,7 @@ struct RemovePointer<T*> {
using type = T; using type = T;
}; };
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
struct RemovePointer<raw_ptr<T, Traits>> { struct RemovePointer<raw_ptr<T, Traits>> {
using type = T; using type = T;
}; };
@ -1311,23 +1304,19 @@ using base::raw_ptr;
// //
// When using it, please provide a justification about what guarantees that it // When using it, please provide a justification about what guarantees that it
// will never be dereferenced after becoming dangling. // will never be dereferenced after becoming dangling.
using DisableDanglingPtrDetection = constexpr auto DisableDanglingPtrDetection = base::RawPtrTraits::kMayDangle;
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
// See `docs/dangling_ptr.md` // See `docs/dangling_ptr.md`
// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the // Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
// occurrences are meant to be removed. See https://crbug.com/1291138. // occurrences are meant to be removed. See https://crbug.com/1291138.
using DanglingUntriaged = constexpr auto DanglingUntriaged = base::RawPtrTraits::kMayDangle;
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
// This type is to be used in callbacks arguments when it is known that they // This type is to be used in callbacks arguments when it is known that they
// might receive dangling pointers. In any other cases, please use one of: // might receive dangling pointers. In any other cases, please use one of:
// - raw_ptr<T, DanglingUntriaged> // - raw_ptr<T, DanglingUntriaged>
// - raw_ptr<T, DisableDanglingPtrDetection> // - raw_ptr<T, DisableDanglingPtrDetection>
template <typename T> template <typename T, base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty>
using MayBeDangling = base::raw_ptr< using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>;
T,
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>>;
// The following template parameters are only meaningful when `raw_ptr` // The following template parameters are only meaningful when `raw_ptr`
// is `MTECheckedPtr` (never the case unless a particular GN arg is set // is `MTECheckedPtr` (never the case unless a particular GN arg is set
@ -1342,25 +1331,18 @@ using MayBeDangling = base::raw_ptr<
// See `base/memory/raw_ptr_mtecheckedptr.md` // See `base/memory/raw_ptr_mtecheckedptr.md`
// Direct pass-through to no-op implementation. // Direct pass-through to no-op implementation.
using DegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle< constexpr auto DegradeToNoOpWhenMTE = base::RawPtrTraits::kDisableMTECheckedPtr;
base::raw_ptr_traits::DisableMTECheckedPtr>;
// As above, but with the "untriaged dangling" annotation. // The use of pointer arithmetic with raw_ptr is strongly discouraged and
using DanglingUntriagedDegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle< // disabled by default. Usually a container like span<> should be used
base::raw_ptr_traits::MayDangle, // instead of the raw_ptr.
base::raw_ptr_traits::DisableMTECheckedPtr>; constexpr auto AllowPtrArithmetic = base::RawPtrTraits::kAllowPtrArithmetic;
// As above, but with the "explicitly disable protection" annotation.
using DisableDanglingPtrDetectionDegradeToNoOpWhenMTE =
base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::MayDangle,
base::raw_ptr_traits::DisableMTECheckedPtr>;
namespace std { namespace std {
// Override so set/map lookups do not create extra raw_ptr. This also allows // Override so set/map lookups do not create extra raw_ptr. This also allows
// dangling pointers to be used for lookup. // dangling pointers to be used for lookup.
template <typename T, typename Traits> template <typename T, base::RawPtrTraits Traits>
struct less<raw_ptr<T, Traits>> { struct less<raw_ptr<T, Traits>> {
using Impl = typename raw_ptr<T, Traits>::Impl; using Impl = typename raw_ptr<T, Traits>::Impl;
using is_transparent = void; using is_transparent = void;
@ -1385,7 +1367,7 @@ struct less<raw_ptr<T, Traits>> {
// Define for cases where raw_ptr<T> holds a pointer to an array of type T. // Define for cases where raw_ptr<T> holds a pointer to an array of type T.
// This is consistent with definition of std::iterator_traits<T*>. // This is consistent with definition of std::iterator_traits<T*>.
// Algorithms like std::binary_search need that. // Algorithms like std::binary_search need that.
template <typename T, typename Traits> template <typename T, base::RawPtrTraits Traits>
struct iterator_traits<raw_ptr<T, Traits>> { struct iterator_traits<raw_ptr<T, Traits>> {
using difference_type = ptrdiff_t; using difference_type = ptrdiff_t;
using value_type = std::remove_cv_t<T>; using value_type = std::remove_cv_t<T>;
@ -1394,6 +1376,33 @@ struct iterator_traits<raw_ptr<T, Traits>> {
using iterator_category = std::random_access_iterator_tag; using iterator_category = std::random_access_iterator_tag;
}; };
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ptr<T, Traits>> {
using pointer = ::raw_ptr<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ptr<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(&r);
}
static constexpr element_type* to_address(pointer p) noexcept {
return p.get();
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std } // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_

View File

@ -12,13 +12,34 @@
namespace base::internal { namespace base::internal {
PA_NO_SANITIZE("address") PA_NO_SANITIZE("address")
bool RawPtrAsanUnownedImpl::EndOfAliveAllocation(const volatile void* ptr) { bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr) {
uintptr_t address = reinterpret_cast<uintptr_t>(ptr); uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return __asan_region_is_poisoned(reinterpret_cast<void*>(address), 1) &&
// Normally, we probe the first byte of an object, but in cases of pointer
// arithmetic, we may be probing subsequent bytes, including the legal
// "end + 1" position.
//
// Alas, ASAN will claim an unmapped page is unpoisoned, so willfully ignore
// the fist address of a page, since "end + 1" of an object allocated exactly
// up to a page boundary will SEGV on probe. This will cause false negatives
// for pointers that happen to be page aligned, which is undesirable but
// necessary for now.
//
// We minimize the consequences by using the pointer arithmetic flag in
// higher levels to conditionalize this suppression.
//
// TODO(tsepez): this may still fail for a non-accessible but non-null
// return from, say, malloc(0) which happens to be page-aligned.
//
// TODO(tsepez): enforce the pointer arithmetic flag. Until then, we
// may fail here if a pointer requires the flag but is lacking it.
return is_adjustable_ptr &&
((address & 0x0fff) == 0 ||
__asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) &&
!__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1); !__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1);
} }
bool RawPtrAsanUnownedImpl::LikelySmuggledScalar(const volatile void* ptr) { bool LikelySmuggledScalar(const volatile void* ptr) {
intptr_t address = reinterpret_cast<intptr_t>(ptr); intptr_t address = reinterpret_cast<intptr_t>(ptr);
return address < 0x4000; // Negative or small positive. return address < 0x4000; // Negative or small positive.
} }

View File

@ -19,6 +19,10 @@
namespace base::internal { namespace base::internal {
bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr);
bool LikelySmuggledScalar(const volatile void* ptr);
template <bool IsAdjustablePtr>
struct RawPtrAsanUnownedImpl { struct RawPtrAsanUnownedImpl {
// Wraps a pointer. // Wraps a pointer.
template <typename T> template <typename T>
@ -91,14 +95,11 @@ struct RawPtrAsanUnownedImpl {
template <typename T> template <typename T>
static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) { static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) {
if (wrapped_ptr && !LikelySmuggledScalar(wrapped_ptr) && if (wrapped_ptr && !LikelySmuggledScalar(wrapped_ptr) &&
!EndOfAliveAllocation(wrapped_ptr)) { !EndOfAliveAllocation(wrapped_ptr, IsAdjustablePtr)) {
reinterpret_cast<const volatile uint8_t*>(wrapped_ptr)[0]; reinterpret_cast<const volatile uint8_t*>(wrapped_ptr)[0];
} }
} }
static bool EndOfAliveAllocation(const volatile void* ptr);
static bool LikelySmuggledScalar(const volatile void* ptr);
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T> template <typename T>

View File

@ -13,6 +13,7 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -133,7 +134,7 @@ struct RawPtrBackupRefImpl {
#endif #endif
AcquireInternal(address); AcquireInternal(address);
} else { } else {
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
#if PA_HAS_BUILTIN(__builtin_constant_p) #if PA_HAS_BUILTIN(__builtin_constant_p)
// Similarly to `IsSupportedAndNotNull` above, elide the // Similarly to `IsSupportedAndNotNull` above, elide the
// `BanSuperPageFromBRPPool` call if the compiler can prove that `address` // `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
@ -148,7 +149,7 @@ struct RawPtrBackupRefImpl {
partition_alloc::internal::AddressPoolManagerBitmap:: partition_alloc::internal::AddressPoolManagerBitmap::
BanSuperPageFromBRPPool(address); BanSuperPageFromBRPPool(address);
} }
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
} }
return ptr; return ptr;

View File

@ -0,0 +1,76 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
// Struct intended to be used with designated initializers and passed
// to the `CountersMatch()` matcher.
//
// `CountingImplType` isn't used directly; it tells the `CountersMatch`
// matcher which impl's static members should be checked.
template <typename CountingImplType>
struct CountingRawPtrExpectations {
absl::optional<int> wrap_raw_ptr_cnt;
absl::optional<int> release_wrapped_ptr_cnt;
absl::optional<int> get_for_dereference_cnt;
absl::optional<int> get_for_extraction_cnt;
absl::optional<int> get_for_comparison_cnt;
absl::optional<int> wrapped_ptr_swap_cnt;
absl::optional<int> wrapped_ptr_less_cnt;
absl::optional<int> pointer_to_member_operator_cnt;
absl::optional<int> wrap_raw_ptr_for_dup_cnt;
absl::optional<int> get_for_duplication_cnt;
};
#define REPORT_UNEQUAL_RAW_PTR_COUNTER(member_name, CounterClassImpl) \
{ \
if (arg.member_name.has_value() && \
arg.member_name.value() != CounterClassImpl::member_name) { \
*result_listener << "Expected `" #member_name "` to be " \
<< arg.member_name.value() << " but got " \
<< CounterClassImpl::member_name << "; "; \
result = false; \
} \
}
#define REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CounterClassImpl) \
{ \
result = true; \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(release_wrapped_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_dereference_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_extraction_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_comparison_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_swap_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_less_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(pointer_to_member_operator_cnt, \
CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_for_dup_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_duplication_cnt, CounterClassImpl) \
}
// Matcher used with `CountingRawPtr`. Provides slightly shorter
// boilerplate for verifying counts. This inner function is detached
// from the `MATCHER` to isolate the templating.
template <typename CountingImplType>
bool CountersMatchImpl(const CountingRawPtrExpectations<CountingImplType>& arg,
testing::MatchResultListener* result_listener) {
bool result = true;
REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CountingImplType);
return result;
}
// Implicit `arg` has type `CountingRawPtrExpectations`, specialized for
// the specific counting impl.
MATCHER(CountersMatch, "counting impl has specified counters") {
return CountersMatchImpl(arg, result_listener);
}
#undef REPORT_UNEQUAL_RAW_PTR_COUNTERS
#undef REPORT_UNEQUAL_RAW_PTR_COUNTER
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_

View File

@ -17,7 +17,7 @@
namespace base { namespace base {
template <class T, typename Traits> template <class T, RawPtrTraits Traits>
class raw_ref; class raw_ref;
namespace internal { namespace internal {
@ -25,7 +25,7 @@ namespace internal {
template <class T> template <class T>
struct is_raw_ref : std::false_type {}; struct is_raw_ref : std::false_type {};
template <class T, typename Traits> template <class T, RawPtrTraits Traits>
struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {}; struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {};
template <class T> template <class T>
@ -53,7 +53,7 @@ constexpr inline bool is_raw_ref_v = is_raw_ref<T>::value;
// Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed // Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed
// independent of the underlying `T`, similar to `std::reference_wrapper`. That // independent of the underlying `T`, similar to `std::reference_wrapper`. That
// means the reference inside it can be moved and reassigned. // means the reference inside it can be moved and reassigned.
template <class T, typename Traits = raw_ptr_traits::TraitBundle<>> template <class T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref { class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// operator* is used with the expectation of GetForExtraction semantics: // operator* is used with the expectation of GetForExtraction semantics:
// //
@ -63,9 +63,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// The implementation of operator* provides GetForDereference semantics, and // The implementation of operator* provides GetForDereference semantics, and
// this results in spurious crashes in BRP-ASan builds, so we need to disable // this results in spurious crashes in BRP-ASan builds, so we need to disable
// hooks that provide BRP-ASan instrumentation for raw_ref. // hooks that provide BRP-ASan instrumentation for raw_ref.
using Inner = raw_ptr< using Inner = raw_ptr<T, Traits | RawPtrTraits::kDisableHooks>;
T,
typename Traits::template AddTraitT<raw_ptr_traits::DisableHooks>>;
public: public:
using Impl = typename Inner::Impl; using Impl = typename Inner::Impl;
@ -81,7 +79,8 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
internal::MTECheckedPtrImplPartitionAllocSupport>> || internal::MTECheckedPtrImplPartitionAllocSupport>> ||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR) #if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl> || std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR) #endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrNoOpImpl>; std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
@ -98,24 +97,24 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
raw_ref& operator=(const T&& p) = delete; raw_ref& operator=(const T&& p) = delete;
PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) { PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
} }
PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) { PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
} }
} }
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
@ -128,14 +127,14 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
: inner_(p.inner_) { : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
} }
// Deliberately implicit in order to support implicit upcast. // Deliberately implicit in order to support implicit upcast.
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
: inner_(std::move(p.inner_)) { : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
} }
@ -149,13 +148,13 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// Upcast assignment // Upcast assignment
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
@ -164,7 +163,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
PA_ALWAYS_INLINE T& operator*() const { PA_ALWAYS_INLINE T& operator*() const {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator*(); return inner_.operator*();
} }
@ -173,12 +172,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// used in place of operator*() when the memory referred to by the reference // used in place of operator*() when the memory referred to by the reference
// is not immediately going to be accessed. // is not immediately going to be accessed.
PA_ALWAYS_INLINE T& get() const { PA_ALWAYS_INLINE T& get() const {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return *inner_.get(); return *inner_.get();
} }
PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL { PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator->(); return inner_.operator->();
} }
@ -191,123 +190,142 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept { friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
swap(lhs.inner_, rhs.inner_); swap(lhs.inner_, rhs.inner_);
} }
template <class U> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<U, Traits>& rhs) { const raw_ref<V, Traits2>& rhs);
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
return lhs.inner_ == rhs.inner_; const raw_ref<V, Traits2>& rhs);
} template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
template <class U> friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const raw_ref<V, Traits2>& rhs);
const raw_ref<U, Traits>& rhs) { template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. const raw_ref<V, Traits2>& rhs);
return lhs.inner_ != rhs.inner_; template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
} friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
template <class U> const raw_ref<V, Traits2>& rhs);
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
const raw_ref<U, Traits>& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. const raw_ref<V, Traits2>& rhs);
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ == &rhs; return lhs.inner_ == &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ != &rhs; return lhs.inner_ != &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ < &rhs; return lhs.inner_ < &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ > &rhs; return lhs.inner_ > &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ <= &rhs; return lhs.inner_ <= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ >= &rhs; return lhs.inner_ >= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs == rhs.inner_; return &lhs == rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs != rhs.inner_; return &lhs != rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs < rhs.inner_; return &lhs < rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs > rhs.inner_; return &lhs > rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs <= rhs.inner_; return &lhs <= rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs >= rhs.inner_; return &lhs >= rhs.inner_;
} }
private: private:
template <class U, typename R> template <class U, RawPtrTraits R>
friend class raw_ref; friend class raw_ref;
Inner inner_; Inner inner_;
}; };
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ == rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ != rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
// CTAD deduction guide. // CTAD deduction guide.
template <class T> template <class T>
raw_ref(T&) -> raw_ref<T>; raw_ref(T&) -> raw_ref<T>;
@ -318,7 +336,7 @@ raw_ref(const T&) -> raw_ref<const T>;
template <typename T> template <typename T>
struct IsRawRef : std::false_type {}; struct IsRawRef : std::false_type {};
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
struct IsRawRef<raw_ref<T, Traits>> : std::true_type {}; struct IsRawRef<raw_ref<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -329,7 +347,7 @@ struct RemoveRawRef {
using type = T; using type = T;
}; };
template <typename T, typename Traits> template <typename T, RawPtrTraits Traits>
struct RemoveRawRef<raw_ref<T, Traits>> { struct RemoveRawRef<raw_ref<T, Traits>> {
using type = T; using type = T;
}; };
@ -345,7 +363,7 @@ namespace std {
// Override so set/map lookups do not create extra raw_ref. This also // Override so set/map lookups do not create extra raw_ref. This also
// allows C++ references to be used for lookup. // allows C++ references to be used for lookup.
template <typename T, typename Traits> template <typename T, base::RawPtrTraits Traits>
struct less<raw_ref<T, Traits>> { struct less<raw_ref<T, Traits>> {
using Impl = typename raw_ref<T, Traits>::Impl; using Impl = typename raw_ref<T, Traits>::Impl;
using is_transparent = void; using is_transparent = void;
@ -367,6 +385,37 @@ struct less<raw_ref<T, Traits>> {
} }
}; };
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ref<T, Traits>> {
using pointer = ::raw_ref<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ref<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(r);
}
static constexpr element_type* to_address(pointer p) noexcept {
// `raw_ref::get` is used instead of raw_ref::operator*`. It provides
// GetForExtraction rather rather than GetForDereference semantics (see
// raw_ptr.h). This should be used when we we don't know the memory will be
// accessed.
return &(p.get());
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std } // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_

View File

@ -4,14 +4,16 @@
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
ReservationOffsetTable::_PaddedReservationOffsetTables ReservationOffsetTable::_PaddedReservationOffsetTables
ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN; ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN;
#else #else
ReservationOffsetTable::_ReservationOffsetTable ReservationOffsetTable::_ReservationOffsetTable
ReservationOffsetTable::reservation_offset_table_; ReservationOffsetTable::reservation_offset_table_;
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h" #include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -67,7 +66,7 @@ static constexpr uint16_t kOffsetTagNormalBuckets =
// granularity is kSuperPageSize. // granularity is kSuperPageSize.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
public: public:
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// There is one reservation offset table per Pool in 64-bit mode. // There is one reservation offset table per Pool in 64-bit mode.
static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize; static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
static constexpr size_t kReservationOffsetTableLength = static constexpr size_t kReservationOffsetTableLength =
@ -78,7 +77,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull; static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr size_t kReservationOffsetTableLength = static constexpr size_t kReservationOffsetTableLength =
4 * kGiB / kSuperPageSize; 4 * kGiB / kSuperPageSize;
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets, static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets."); "Offsets should be smaller than kOffsetTagNormalBuckets.");
@ -95,7 +94,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
offset = kOffsetTagNotAllocated; offset = kOffsetTagNotAllocated;
} }
}; };
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// If pkey support is enabled, we need to pkey-tag the tables of the pkey // If pkey support is enabled, we need to pkey-tag the tables of the pkey
// pool. For this, we need to pad the tables so that the pkey ones start on a // pool. For this, we need to pad the tables so that the pkey ones start on a
// page boundary. // page boundary.
@ -109,12 +108,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
#else #else
// A single table for the entire 32-bit address space. // A single table for the entire 32-bit address space.
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_; static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
}; };
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) { PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools); PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
return ReservationOffsetTable::padded_reservation_offset_tables_ return ReservationOffsetTable::padded_reservation_offset_tables_
.tables[handle - 1] .tables[handle - 1]
.offsets; .offsets;
@ -144,7 +143,7 @@ PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
ReservationOffsetTable::kReservationOffsetTableLength); ReservationOffsetTable::kReservationOffsetTableLength);
return GetReservationOffsetTable(pool) + table_index; return GetReservationOffsetTable(pool) + table_index;
} }
#else #else // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) { PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
return ReservationOffsetTable::reservation_offset_table_.offsets; return ReservationOffsetTable::reservation_offset_table_.offsets;
} }
@ -154,10 +153,10 @@ PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
return ReservationOffsetTable::reservation_offset_table_.offsets + return ReservationOffsetTable::reservation_offset_table_.offsets +
ReservationOffsetTable::kReservationOffsetTableLength; ReservationOffsetTable::kReservationOffsetTableLength;
} }
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) { PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, find the owning Pool and compute the offset from its base. // In 64-bit mode, find the owning Pool and compute the offset from its base.
auto [pool, offset] = GetPoolAndOffset(address); auto [pool, offset] = GetPoolAndOffset(address);
return ReservationOffsetPointer(pool, offset); return ReservationOffsetPointer(pool, offset);
@ -200,13 +199,13 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
// inside another macro (PA_DCHECK). // inside another macro (PA_DCHECK).
#if !PA_CONFIG(HAS_64_BITS_POINTERS) #if !BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr size_t kBRPOffset = constexpr size_t kBRPOffset =
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap; AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
#else #else
constexpr size_t kBRPOffset = 0ull; constexpr size_t kBRPOffset = 0ull;
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) #endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
// Make sure the reservation start is in the same pool as |address|. // Make sure the reservation start is in the same pool as |address|.
// In the 32-bit mode, the beginning of a reservation may be excluded // In the 32-bit mode, the beginning of a reservation may be excluded
// from the BRP pool, so shift the pointer. The other pools don't have // from the BRP pool, so shift the pointer. The other pools don't have
@ -227,7 +226,7 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
return reservation_start; return reservation_start;
} }
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// If the given address doesn't point to direct-map allocated memory, // If the given address doesn't point to direct-map allocated memory,
// returns 0. // returns 0.
// This variant has better performance than the regular one on 64-bit builds if // This variant has better performance than the regular one on 64-bit builds if
@ -247,7 +246,7 @@ GetDirectMapReservationStart(uintptr_t address,
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0); PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
return reservation_start; return reservation_start;
} }
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// Returns true if |address| is the beginning of the first super page of a // Returns true if |address| is the beginning of the first super page of a
// reservation, i.e. either a normal bucket super page, or the first super page // reservation, i.e. either a normal bucket super page, or the first super page

View File

@ -10,12 +10,11 @@
#include "base/allocator/partition_alloc_features.h" #include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/types/strong_alias.h" #include "base/types/strong_alias.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif #endif
@ -208,7 +207,7 @@ BASE_EXPORT void ConfigurePartitions(
AddDummyRefCount add_dummy_ref_count, AddDummyRefCount add_dummy_ref_count,
AlternateBucketDistribution use_alternate_bucket_distribution); AlternateBucketDistribution use_alternate_bucket_distribution);
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig); BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
#endif #endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -22,7 +22,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_root.h" #include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h" #include "base/allocator/partition_allocator/partition_stats.h"
@ -713,7 +712,7 @@ void ConfigurePartitions(
} }
} }
#if PA_CONFIG(ALLOW_PCSCAN) #if BUILDFLAG(USE_STARSCAN)
void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) { void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
partition_alloc::internal::base::PlatformThread::SetThreadNameHook( partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&::base::PlatformThread::SetName); &::base::PlatformThread::SetName);
@ -730,7 +729,7 @@ void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled(); base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled(); base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
} }
#endif // PA_CONFIG(ALLOW_PCSCAN) #endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// Call this as soon as possible during startup. // Call this as soon as possible during startup.

View File

@ -30,13 +30,14 @@ ThreadSafePartitionRoot& PCScanMetadataAllocator() {
return *allocator; return *allocator;
} }
// TODO(tasak): investigate whether PartitionAlloc tests really need this
// function or not. If we found no tests need, remove it.
void ReinitPCScanMetadataAllocatorForTesting() { void ReinitPCScanMetadataAllocatorForTesting() {
// First, purge memory owned by PCScanMetadataAllocator. // First, purge memory owned by PCScanMetadataAllocator.
PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans | PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages); PurgeFlags::kDiscardUnusedSystemPages);
// Then, reinit the allocator. // Then, reinit the allocator.
PCScanMetadataAllocator().~PartitionRoot(); PCScanMetadataAllocator().ResetForTesting(true); // IN-TEST
memset(&PCScanMetadataAllocator(), 0, sizeof(PCScanMetadataAllocator()));
PCScanMetadataAllocator().Init(kConfig); PCScanMetadataAllocator().Init(kConfig);
} }

View File

@ -34,6 +34,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h" #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h" #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -613,14 +614,14 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr)); PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
// First, check if |maybe_ptr| points to a valid super page or a quarantined // First, check if |maybe_ptr| points to a valid super page or a quarantined
// card. // card.
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE) #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Check if |maybe_ptr| points to a quarantined card. // Check if |maybe_ptr| points to a quarantined card.
if (PA_LIKELY( if (PA_LIKELY(
!QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) { !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
return nullptr; return nullptr;
} }
#else #else // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Without the card table, use the reservation offset table to check if // Without the card table, use the reservation offset table to check if
// |maybe_ptr| points to a valid super-page. It's not as precise (meaning that // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
// we may have hit the slow path more frequently), but reduces the memory // we may have hit the slow path more frequently), but reduces the memory
@ -634,11 +635,11 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
return nullptr; return nullptr;
} }
#endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE) #endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
#else // PA_CONFIG(HAS_64_BITS_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) { if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
return nullptr; return nullptr;
} }
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// We are certain here that |maybe_ptr| points to an allocated super-page. // We are certain here that |maybe_ptr| points to an allocated super-page.
return StateBitmapFromAddr(maybe_ptr); return StateBitmapFromAddr(maybe_ptr);
@ -777,14 +778,14 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
size_t quarantine_size() const { return quarantine_size_; } size_t quarantine_size() const { return quarantine_size_; }
private: private:
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() { PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
return PartitionAddressSpace::RegularPoolBase(); return PartitionAddressSpace::RegularPoolBase();
} }
PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() { PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
return PartitionAddressSpace::RegularPoolBaseMask(); return PartitionAddressSpace::RegularPoolBaseMask();
} }
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) { PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
// |maybe_ptr| may have an MTE tag, so remove it first. // |maybe_ptr| may have an MTE tag, so remove it first.
@ -1289,7 +1290,7 @@ PCScanInternal::~PCScanInternal() = default;
void PCScanInternal::Initialize(PCScan::InitConfig config) { void PCScanInternal::Initialize(PCScan::InitConfig config) {
PA_DCHECK(!is_initialized_); PA_DCHECK(!is_initialized_);
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// Make sure that pools are initialized. // Make sure that pools are initialized.
PartitionAddressSpace::Init(); PartitionAddressSpace::Init();
#endif #endif

View File

@ -9,6 +9,7 @@
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h" #include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
@ -93,12 +94,12 @@ template <typename Derived>
void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) { void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
PA_SCAN_DCHECK(!(end % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency. // MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask; const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
const uintptr_t base = Derived::RegularPoolBase(); const uintptr_t base = Derived::RegularPoolBase();
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
for (; begin < end; begin += sizeof(uintptr_t)) { for (; begin < end; begin += sizeof(uintptr_t)) {
// Read the region word-by-word. Everything that we read is a potential // Read the region word-by-word. Everything that we read is a potential
// pointer to or inside an object on heap. Such an object should be // pointer to or inside an object on heap. Such an object should be
@ -106,13 +107,13 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
// //
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin); const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin);
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
if (PA_LIKELY((maybe_ptr & mask) != base)) if (PA_LIKELY((maybe_ptr & mask) != base))
continue; continue;
#else #else
if (!maybe_ptr) if (!maybe_ptr)
continue; continue;
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
derived().CheckPointer(maybe_ptr); derived().CheckPointer(maybe_ptr);
} }
} }

View File

@ -8,6 +8,7 @@
#include <limits> #include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -135,12 +136,12 @@ namespace {
} // namespace } // namespace
void Stack::IteratePointers(StackVisitor* visitor) const { void Stack::IteratePointers(StackVisitor* visitor) const {
#if defined(PA_PCSCAN_STACK_SUPPORTED) #if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl); PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by // No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration. // the regular conservative stack iteration.
IterateSafeStackIfNecessary(visitor); IterateSafeStackIfNecessary(visitor);
#endif #endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -355,7 +355,7 @@ void ThreadCache::RemoveTombstoneForTesting() {
// static // static
void ThreadCache::Init(PartitionRoot<>* root) { void ThreadCache::Init(PartitionRoot<>* root) {
#if BUILDFLAG(IS_NACL) #if BUILDFLAG(IS_NACL)
PA_IMMEDIATE_CRASH(); static_assert(false, "PartitionAlloc isn't supported for NaCl");
#endif #endif
PA_CHECK(root->buckets[kBucketCount - 1].slot_size == PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
ThreadCache::kLargeSizeThreshold); ThreadCache::kLargeSizeThreshold);

View File

@ -27,7 +27,7 @@
#include "base/allocator/partition_allocator/partition_tls.h" #include "base/allocator/partition_allocator/partition_tls.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(ARCH_CPU_X86_64) && PA_CONFIG(HAS_64_BITS_POINTERS) #if defined(ARCH_CPU_X86_64) && BUILDFLAG(HAS_64_BIT_POINTERS)
#include <algorithm> #include <algorithm>
#endif #endif
@ -43,13 +43,13 @@ namespace tools {
// //
// These two values were chosen randomly, and in particular neither is a valid // These two values were chosen randomly, and in particular neither is a valid
// pointer on most 64 bit architectures. // pointer on most 64 bit architectures.
#if PA_CONFIG(HAS_64_BITS_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63; constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf; constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
#else #else
constexpr uintptr_t kNeedle1 = 0xe69e32f3; constexpr uintptr_t kNeedle1 = 0xe69e32f3;
constexpr uintptr_t kNeedle2 = 0x9615ee1c; constexpr uintptr_t kNeedle2 = 0x9615ee1c;
#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// This array contains, in order: // This array contains, in order:
// - kNeedle1 // - kNeedle1
@ -161,14 +161,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
internal::base::TimeDelta periodic_purge_next_interval_ = internal::base::TimeDelta periodic_purge_next_interval_ =
kDefaultPurgeInterval; kDefaultPurgeInterval;
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
uint8_t largest_active_bucket_index_ = 1;
#else
uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex( uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
ThreadCacheLimits::kDefaultSizeThreshold); ThreadCacheLimits::kDefaultSizeThreshold);
#endif
}; };
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default; constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
@ -392,15 +386,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size); void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
static void SetGlobalLimits(PartitionRoot<>* root, float multiplier); static void SetGlobalLimits(PartitionRoot<>* root, float multiplier);
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
static constexpr uint16_t kBucketCount = 1;
#else
static constexpr uint16_t kBucketCount = static constexpr uint16_t kBucketCount =
internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) + internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
1; 1;
#endif
static_assert( static_assert(
kBucketCount < internal::kNumBuckets, kBucketCount < internal::kNumBuckets,
"Cannot have more cached buckets than what the allocator supports"); "Cannot have more cached buckets than what the allocator supports");
@ -547,7 +535,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
internal::PartitionFreelistEntry* entry = bucket.freelist_head; internal::PartitionFreelistEntry* entry = bucket.freelist_head;
// TODO(lizeb): Consider removing once crbug.com/1382658 is fixed. // TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \ #if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
PA_CONFIG(HAS_64_BITS_POINTERS) BUILDFLAG(HAS_64_BIT_POINTERS)
// x86_64 architecture now supports 57 bits of address space, as of Ice Lake // x86_64 architecture now supports 57 bits of address space, as of Ice Lake
// for Intel. However Chrome OS systems do not ship with kernel support for // for Intel. However Chrome OS systems do not ship with kernel support for
// it, but with 48 bits, so all canonical addresses have the upper 16 bits // it, but with 48 bits, so all canonical addresses have the upper 16 bits
@ -555,7 +543,8 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// by the kernel). // by the kernel).
constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1; constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask)); PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
#endif #endif // BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) &&
// BUILDFLAG(HAS_64_BIT_POINTERS)
// Passes the bucket size to |GetNext()|, so that in case of freelist // Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to // corruption, we know the bucket size that lead to the crash, helping to
@ -578,7 +567,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket, PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
uintptr_t slot_start) { uintptr_t slot_start) {
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \ #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
PA_CONFIG(HAS_64_BITS_POINTERS) BUILDFLAG(HAS_64_BIT_POINTERS)
// We see freelist corruption crashes happening in the wild. These are likely // We see freelist corruption crashes happening in the wild. These are likely
// due to out-of-bounds accesses in the previous slot, or to a Use-After-Free // due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
// somewhere in the code. // somewhere in the code.
@ -630,7 +619,7 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
address_aligned += 4; address_aligned += 4;
} }
#endif // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && #endif // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
// PA_CONFIG(HAS_64_BITS_POINTERS) // BUILDFLAG(HAS_64_BIT_POINTERS)
auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache( auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
slot_start, bucket.freelist_head); slot_start, bucket.freelist_head);

View File

@ -14,13 +14,12 @@
// other hyper-thread on this core. See the following for context: // other hyper-thread on this core. See the following for context:
// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
#if BUILDFLAG(IS_NACL) #if PA_CONFIG(IS_NONCLANG_MSVC)
// Inline assembly not allowed.
#define PA_YIELD_PROCESSOR ((void)0)
#elif PA_CONFIG(IS_NONCLANG_MSVC)
// MSVC is in its own assemblyless world (crbug.com/1351310#c6). // MSVC is in its own assemblyless world (crbug.com/1351310#c6).
#include <windows.h> #include <windows.h>
#define PA_YIELD_PROCESSOR (YieldProcessor()) #define PA_YIELD_PROCESSOR (YieldProcessor())
#else #else
#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86) #if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
@ -47,6 +46,6 @@
#define PA_YIELD_PROCESSOR ((void)0) #define PA_YIELD_PROCESSOR ((void)0)
#endif #endif
#endif // BUILDFLAG(IS_NACL) #endif // PA_CONFIG(IS_NONCLANG_MSVC)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_

View File

@ -48,12 +48,17 @@ source_set("jni_sample_native_side") {
] ]
} }
generate_jni_registration("jni_registration") {
targets = [ ":jni_sample_java" ]
manual_jni_registration = true
}
shared_library("jni_sample_lib") { shared_library("jni_sample_lib") {
sources = [ "sample_entry_point.cc" ] sources = [ "sample_entry_point.cc" ]
deps = [ deps = [
":jni_registration",
":jni_sample_native_side", ":jni_sample_native_side",
":sample_jni_apk__final_jni", # For registration_header
"//base", "//base",
] ]
} }
@ -63,7 +68,6 @@ android_apk("sample_jni_apk") {
android_manifest = "AndroidManifest.xml" android_manifest = "AndroidManifest.xml"
deps = [ ":jni_sample_java" ] deps = [ ":jni_sample_java" ]
shared_libraries = [ ":jni_sample_lib" ] shared_libraries = [ ":jni_sample_lib" ]
manual_jni_registration = true
} }
# Serves to test that generated bindings compile properly. # Serves to test that generated bindings compile properly.

View File

@ -175,6 +175,16 @@ public class AnimationFrameTimeHistogramTest {
If a native method is called without setting a mock in a unit test, an If a native method is called without setting a mock in a unit test, an
`UnsupportedOperationException` will be thrown. `UnsupportedOperationException` will be thrown.
#### Special case: DFMs
DFMs have their own generated `GEN_JNI`s, which are `<module_name>_GEN_JNI`. In
order to get your DFM's JNI to use the `<module_name>` prefix, you must add your
module name into the argument of the `@NativeMethods` annotation.
So, for example, say your module was named `test_module`. You would annotate
your `Natives` interface with `@NativeMethods("test_module")`, and this would
result in `test_module_GEN_JNI`.
### Testing for readiness: use `get()` ### Testing for readiness: use `get()`
JNI Generator automatically produces checks that verify that the Natives interface can be safely JNI Generator automatically produces checks that verify that the Natives interface can be safely

View File

@ -1,13 +1,10 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2012 The Chromium Authors # Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
"""Extracts native methods from a Java file and generates the JNI bindings. """Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests.""" If you change this, please run and update the tests."""
from __future__ import print_function
import argparse import argparse
import base64 import base64
import collections import collections
@ -62,7 +59,7 @@ _EXTRACT_METHODS_REGEX = re.compile(
flags=re.DOTALL) flags=re.DOTALL)
_NATIVE_PROXY_EXTRACTION_REGEX = re.compile( _NATIVE_PROXY_EXTRACTION_REGEX = re.compile(
r'@NativeMethods[\S\s]+?interface\s*' r'@NativeMethods(?:\(\s*"(?P<module_name>\w+)"\s*\))?[\S\s]+?interface\s*'
r'(?P<interface_name>\w*)\s*(?P<interface_body>{(\s*.*)+?\s*})') r'(?P<interface_name>\w*)\s*(?P<interface_body>{(\s*.*)+?\s*})')
# Use 100 columns rather than 80 because it makes many lines more readable. # Use 100 columns rather than 80 because it makes many lines more readable.
@ -842,9 +839,12 @@ class JNIFromJavaP(object):
self.constant_fields.append( self.constant_fields.append(
ConstantField(name=match.group('name'), value=value.group('value'))) ConstantField(name=match.group('name'), value=value.group('value')))
# We pass in an empty string for the module (which will make the JNI use the
# base module's files) for all javap-derived JNI. There may be a way to get
# the module from a jar file, but it's not needed right now.
self.inl_header_file_generator = InlHeaderFileGenerator( self.inl_header_file_generator = InlHeaderFileGenerator(
self.namespace, self.fully_qualified_class, [], self.called_by_natives, '', self.namespace, self.fully_qualified_class, [],
self.constant_fields, self.jni_params, options) self.called_by_natives, self.constant_fields, self.jni_params, options)
def GetContent(self): def GetContent(self):
return self.inl_header_file_generator.GetContent() return self.inl_header_file_generator.GetContent()
@ -875,17 +875,21 @@ class ProxyHelpers(object):
MAX_CHARS_FOR_HASHED_NATIVE_METHODS = 8 MAX_CHARS_FOR_HASHED_NATIVE_METHODS = 8
@staticmethod @staticmethod
def GetClass(use_hash): def GetClass(short_name, name_prefix=None):
return 'N' if use_hash else 'GEN_JNI' if not name_prefix:
name_prefix = ''
else:
name_prefix += '_'
return name_prefix + ('N' if short_name else 'GEN_JNI')
@staticmethod @staticmethod
def GetPackage(use_hash): def GetPackage(short_name):
return 'J' if use_hash else 'org/chromium/base/natives' return 'J' if short_name else 'org/chromium/base/natives'
@staticmethod @staticmethod
def GetQualifiedClass(use_hash): def GetQualifiedClass(short_name, name_prefix=None):
return '%s/%s' % (ProxyHelpers.GetPackage(use_hash), return '%s/%s' % (ProxyHelpers.GetPackage(short_name),
ProxyHelpers.GetClass(use_hash)) ProxyHelpers.GetClass(short_name, name_prefix))
@staticmethod @staticmethod
def CreateHashedMethodName(fully_qualified_class_name, method_name): def CreateHashedMethodName(fully_qualified_class_name, method_name):
@ -934,8 +938,18 @@ class ProxyHelpers(object):
ptr_type, ptr_type,
include_test_only=True): include_test_only=True):
methods = [] methods = []
first_match = True
module_name = None
for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents): for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents):
interface_body = match.group('interface_body') interface_body = match.group('interface_body')
if first_match:
module_name = match.group('module_name')
first_match = False
else:
assert module_name == match.group(
'module_name'
), 'JNI cannot belong to two modules in one file {} and {}'.format(
module_name, match.group('module_name'))
for method in _EXTRACT_METHODS_REGEX.finditer(interface_body): for method in _EXTRACT_METHODS_REGEX.finditer(interface_body):
name = method.group('name') name = method.group('name')
if not include_test_only and _NameIsTestOnly(name): if not include_test_only and _NameIsTestOnly(name):
@ -961,7 +975,9 @@ class ProxyHelpers(object):
ptr_type=ptr_type) ptr_type=ptr_type)
methods.append(native) methods.append(native)
return methods if not module_name:
module_name = ''
return methods, module_name
class JNIFromJavaSource(object): class JNIFromJavaSource(object):
@ -972,20 +988,19 @@ class JNIFromJavaSource(object):
self.jni_params = JniParams(fully_qualified_class) self.jni_params = JniParams(fully_qualified_class)
self.jni_params.ExtractImportsAndInnerClasses(contents) self.jni_params.ExtractImportsAndInnerClasses(contents)
jni_namespace = ExtractJNINamespace(contents) or options.namespace jni_namespace = ExtractJNINamespace(contents) or options.namespace
natives = ExtractNatives(contents, options.ptr_type)
called_by_natives = ExtractCalledByNatives(self.jni_params, contents, called_by_natives = ExtractCalledByNatives(self.jni_params, contents,
options.always_mangle) options.always_mangle)
natives += ProxyHelpers.ExtractStaticProxyNatives(fully_qualified_class, natives, module_name = ProxyHelpers.ExtractStaticProxyNatives(
contents, fully_qualified_class, contents, options.ptr_type)
options.ptr_type) natives += ExtractNatives(contents, options.ptr_type)
if len(natives) == 0 and len(called_by_natives) == 0: if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError( raise SyntaxError(
'Unable to find any JNI methods for %s.' % fully_qualified_class) 'Unable to find any JNI methods for %s.' % fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator( inl_header_file_generator = InlHeaderFileGenerator(
jni_namespace, fully_qualified_class, natives, called_by_natives, [], module_name, jni_namespace, fully_qualified_class, natives,
self.jni_params, options) called_by_natives, [], self.jni_params, options)
self.content = inl_header_file_generator.GetContent() self.content = inl_header_file_generator.GetContent()
def GetContent(self): def GetContent(self):
@ -1005,11 +1020,13 @@ class HeaderFileGeneratorHelper(object):
def __init__(self, def __init__(self,
class_name, class_name,
module_name,
fully_qualified_class, fully_qualified_class,
use_proxy_hash, use_proxy_hash,
split_name=None, split_name=None,
enable_jni_multiplexing=False): enable_jni_multiplexing=False):
self.class_name = class_name self.class_name = class_name
self.module_name = module_name
self.fully_qualified_class = fully_qualified_class self.fully_qualified_class = fully_qualified_class
self.use_proxy_hash = use_proxy_hash self.use_proxy_hash = use_proxy_hash
self.split_name = split_name self.split_name = split_name
@ -1031,8 +1048,8 @@ class HeaderFileGeneratorHelper(object):
method_name = EscapeClassName(native.proxy_name) method_name = EscapeClassName(native.proxy_name)
return 'Java_%s_%s' % (EscapeClassName( return 'Java_%s_%s' % (EscapeClassName(
ProxyHelpers.GetQualifiedClass( ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash self.use_proxy_hash or self.enable_jni_multiplexing,
or self.enable_jni_multiplexing)), method_name) self.module_name)), method_name)
template = Template('Java_${JAVA_NAME}_native${NAME}') template = Template('Java_${JAVA_NAME}_native${NAME}')
@ -1047,9 +1064,9 @@ class HeaderFileGeneratorHelper(object):
ret = collections.OrderedDict() ret = collections.OrderedDict()
for entry in origin: for entry in origin:
if isinstance(entry, NativeMethod) and entry.is_proxy: if isinstance(entry, NativeMethod) and entry.is_proxy:
use_hash = self.use_proxy_hash or self.enable_jni_multiplexing short_name = self.use_proxy_hash or self.enable_jni_multiplexing
ret[ProxyHelpers.GetClass(use_hash)] \ ret[ProxyHelpers.GetClass(short_name, self.module_name)] \
= ProxyHelpers.GetQualifiedClass(use_hash) = ProxyHelpers.GetQualifiedClass(short_name, self.module_name)
continue continue
ret[self.class_name] = self.fully_qualified_class ret[self.class_name] = self.fully_qualified_class
@ -1083,7 +1100,8 @@ const char kClassPath_${JAVA_CLASS}[] = \
# Since all proxy methods use the same class, defining this in every # Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations. # header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass( if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing): self.use_proxy_hash or self.enable_jni_multiplexing,
self.module_name):
ret += [template.substitute(values)] ret += [template.substitute(values)]
class_getter = """\ class_getter = """\
@ -1115,7 +1133,8 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
# Since all proxy methods use the same class, defining this in every # Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations. # header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass( if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing): self.use_proxy_hash or self.enable_jni_multiplexing,
self.module_name):
ret += [template.substitute(values)] ret += [template.substitute(values)]
return ''.join(ret) return ''.join(ret)
@ -1124,7 +1143,7 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
class InlHeaderFileGenerator(object): class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration.""" """Generates an inline header file for JNI integration."""
def __init__(self, namespace, fully_qualified_class, natives, def __init__(self, module_name, namespace, fully_qualified_class, natives,
called_by_natives, constant_fields, jni_params, options): called_by_natives, constant_fields, jni_params, options):
self.namespace = namespace self.namespace = namespace
self.fully_qualified_class = fully_qualified_class self.fully_qualified_class = fully_qualified_class
@ -1137,6 +1156,7 @@ class InlHeaderFileGenerator(object):
self.options = options self.options = options
self.helper = HeaderFileGeneratorHelper( self.helper = HeaderFileGeneratorHelper(
self.class_name, self.class_name,
module_name,
fully_qualified_class, fully_qualified_class,
self.options.use_proxy_hash, self.options.use_proxy_hash,
split_name=self.options.split_name, split_name=self.options.split_name,

View File

@ -12,14 +12,17 @@ file.
""" """
import collections import collections
import copy
import difflib import difflib
import inspect import inspect
import optparse import optparse
import os import os
import sys import sys
import tempfile
import unittest import unittest
import jni_generator import jni_generator
import jni_registration_generator import jni_registration_generator
import zipfile
from jni_generator import CalledByNative from jni_generator import CalledByNative
from jni_generator import IsMainDexJavaClass from jni_generator import IsMainDexJavaClass
from jni_generator import NativeMethod from jni_generator import NativeMethod
@ -44,7 +47,7 @@ def _RemoveHashedNames(natives):
return ret return ret
class TestOptions(object): class JniGeneratorOptions(object):
"""The mock options object which is passed to the jni_generator.py script.""" """The mock options object which is passed to the jni_generator.py script."""
def __init__(self): def __init__(self):
@ -54,7 +57,6 @@ class TestOptions(object):
self.ptr_type = 'long' self.ptr_type = 'long'
self.cpp = 'cpp' self.cpp = 'cpp'
self.javap = 'mock-javap' self.javap = 'mock-javap'
self.native_exports_optional = True
self.enable_profiling = False self.enable_profiling = False
self.enable_tracing = False self.enable_tracing = False
self.use_proxy_hash = False self.use_proxy_hash = False
@ -65,6 +67,21 @@ class TestOptions(object):
self.include_test_only = True self.include_test_only = True
class JniRegistrationGeneratorOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
self.sources_exclusions = []
self.namespace = None
self.enable_proxy_mocks = False
self.require_mocks = False
self.use_proxy_hash = False
self.enable_jni_multiplexing = False
self.manual_jni_registration = False
self.include_test_only = False
self.header_path = None
class BaseTest(unittest.TestCase): class BaseTest(unittest.TestCase):
@staticmethod @staticmethod
@ -100,10 +117,42 @@ class BaseTest(unittest.TestCase):
signature_to_cases[signature].extend(cases) signature_to_cases[signature].extend(cases)
combined_dict[ combined_dict[
'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls( 'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls(
signature_to_cases, namespace) signature_to_cases, '')
return combined_dict return combined_dict
def _TestEndToEndRegistration(self,
input_java_src_files,
options,
name_to_goldens,
header_golden=None):
with tempfile.TemporaryDirectory() as tdir:
options.srcjar_path = os.path.join(tdir, 'srcjar.jar')
if header_golden:
options.header_path = os.path.join(tdir, 'header.h')
input_java_paths = [
self._JoinScriptDir(os.path.join(_JAVA_SRC_DIR, f))
for f in input_java_src_files
]
jni_registration_generator._Generate(options, input_java_paths)
with zipfile.ZipFile(options.srcjar_path, 'r') as srcjar:
for name in srcjar.namelist():
self.assertTrue(
name in name_to_goldens,
f'Found {name} output, but not present in name_to_goldens map.')
contents = srcjar.read(name).decode('utf-8')
self.AssertGoldenTextEquals(contents,
golden_file=name_to_goldens[name])
if header_golden:
with open(options.header_path, 'r') as f:
# Temp directory will cause some diffs each time we run if we don't
# normalize.
contents = f.read().replace(
tdir.replace('/', '_').upper(), 'TEMP_DIR')
self.AssertGoldenTextEquals(contents, golden_file=header_golden)
def _JoinScriptDir(self, path): def _JoinScriptDir(self, path):
script_dir = os.path.dirname(sys.argv[0]) script_dir = os.path.dirname(sys.argv[0])
return os.path.join(script_dir, path) return os.path.join(script_dir, path)
@ -123,7 +172,7 @@ class BaseTest(unittest.TestCase):
content = f.read() content = f.read()
opts = options opts = options
if opts is None: if opts is None:
opts = TestOptions() opts = JniGeneratorOptions()
jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz, jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz,
opts) opts)
@ -192,8 +241,8 @@ class BaseTest(unittest.TestCase):
if golden_file is None: if golden_file is None:
self.assertTrue( self.assertTrue(
caller.startswith('test'), caller.startswith('test'),
'AssertGoldenTextEquals can only be called from a ' 'AssertGoldenTextEquals can only be called without at golden file '
'test* method, not %s' % caller) 'from a test* method, not %s' % caller)
golden_file = '%s%s.golden' % (caller, suffix) golden_file = '%s%s.golden' % (caller, suffix)
golden_text = self._ReadGoldenFile(golden_file) golden_text = self._ReadGoldenFile(golden_file)
if os.environ.get(_REBASELINE_ENV): if os.environ.get(_REBASELINE_ENV):
@ -209,6 +258,7 @@ class BaseTest(unittest.TestCase):
self.AssertTextEquals(golden_text, generated_text) self.AssertTextEquals(golden_text, generated_text)
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class TestGenerator(BaseTest): class TestGenerator(BaseTest):
def testInspectCaller(self): def testInspectCaller(self):
@ -375,21 +425,21 @@ class TestGenerator(BaseTest):
java_class_name=None) java_class_name=None)
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
h1 = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', h1 = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(h1.GetContent()) self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator('', h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(),
'', '', '',
'org/chromium/TestJni', 'org/chromium/TestJni',
natives, natives, jni_params,
jni_params, True)
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content, use_hash=False, manual_jni_registration=True), reg_options, '', content),
suffix='Registrations') suffix='Registrations')
def testInnerClassNatives(self): def testInnerClassNatives(self):
@ -410,9 +460,9 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesMultiple(self): def testInnerClassNativesMultiple(self):
@ -443,9 +493,9 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesBothInnerAndOuter(self): def testInnerClassNativesBothInnerAndOuter(self):
@ -475,22 +525,22 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
h2 = jni_registration_generator.HeaderGenerator('', h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(),
'', '', '',
'org/chromium/TestJni', 'org/chromium/TestJni',
natives, natives, jni_params,
jni_params, True)
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content, use_hash=False, manual_jni_registration=True), reg_options, '', content),
suffix='Registrations') suffix='Registrations')
def testCalledByNatives(self): def testCalledByNatives(self):
@ -839,9 +889,9 @@ class TestGenerator(BaseTest):
), ),
] ]
self.AssertListEquals(golden_called_by_natives, called_by_natives) self.AssertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [], h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', [],
called_by_natives, [], jni_params, called_by_natives, [], jni_params,
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testCalledByNativeParseError(self): def testCalledByNativeParseError(self):
@ -938,8 +988,8 @@ public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E>
Signature: ([Landroid/icu/text/DisplayContext;)V Signature: ([Landroid/icu/text/DisplayContext;)V
} }
""" """
jni_from_javap = jni_generator.JNIFromJavaP( jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
contents.split('\n'), TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testSnippnetJavap6_7_8(self): def testSnippnetJavap6_7_8(self):
@ -964,12 +1014,12 @@ public class java.util.HashSet {
} }
""" """
jni_from_javap6 = jni_generator.JNIFromJavaP( jni_from_javap6 = jni_generator.JNIFromJavaP(content_javap6.split('\n'),
content_javap6.split('\n'), TestOptions()) JniGeneratorOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP( jni_from_javap7 = jni_generator.JNIFromJavaP(content_javap7.split('\n'),
content_javap7.split('\n'), TestOptions()) JniGeneratorOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP( jni_from_javap8 = jni_generator.JNIFromJavaP(content_javap8.split('\n'),
content_javap8.split('\n'), TestOptions()) JniGeneratorOptions())
self.assertTrue(jni_from_javap6.GetContent()) self.assertTrue(jni_from_javap6.GetContent())
self.assertTrue(jni_from_javap7.GetContent()) self.assertTrue(jni_from_javap7.GetContent())
self.assertTrue(jni_from_javap8.GetContent()) self.assertTrue(jni_from_javap8.GetContent())
@ -983,16 +1033,16 @@ public class java.util.HashSet {
def testFromJavaP(self): def testFromJavaP(self):
contents = self._ReadGoldenFile('testInputStream.javap') contents = self._ReadGoldenFile('testInputStream.javap')
jni_from_javap = jni_generator.JNIFromJavaP( jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
contents.split('\n'), TestOptions()) JniGeneratorOptions())
self.assertEqual(10, len(jni_from_javap.called_by_natives)) self.assertEqual(10, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testConstantsFromJavaP(self): def testConstantsFromJavaP(self):
for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']: for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']:
contents = self._ReadGoldenFile(f) contents = self._ReadGoldenFile(f)
jni_from_javap = jni_generator.JNIFromJavaP( jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
contents.split('\n'), TestOptions()) JniGeneratorOptions())
self.assertEqual(86, len(jni_from_javap.called_by_natives)) self.assertEqual(86, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
@ -1013,8 +1063,8 @@ public class java.util.HashSet {
private native void nativeSyncSetupEnded( private native void nativeSyncSetupEnded(
int nativeAndroidSyncSetupFlowHandler); int nativeAndroidSyncSetupFlowHandler);
""" """
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(test_data, 'foo/bar',
test_data, 'foo/bar', TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testRaisesOnNonJNIMethod(self): def testRaisesOnNonJNIMethod(self):
@ -1025,7 +1075,7 @@ public class java.util.HashSet {
} }
""" """
self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data, self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data,
'foo/bar', TestOptions()) 'foo/bar', JniGeneratorOptions())
def testJniSelfDocumentingExample(self): def testJniSelfDocumentingExample(self):
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
@ -1045,7 +1095,7 @@ public class java.util.HashSet {
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, ('com/google/lookhowextremelylongiam/snarf/' test_data, ('com/google/lookhowextremelylongiam/snarf/'
'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'), 'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'),
TestOptions()) JniGeneratorOptions())
jni_lines = jni_from_java.GetContent().split('\n') jni_lines = jni_from_java.GetContent().split('\n')
line = next( line = next(
line for line in jni_lines if line.lstrip().startswith('#ifndef')) line for line in jni_lines if line.lstrip().startswith('#ifndef'))
@ -1113,7 +1163,7 @@ class Foo {
jni_params.JavaToJni('java/nio/ByteBuffer[]')) jni_params.JavaToJni('java/nio/ByteBuffer[]'))
def testNativesLong(self): def testNativesLong(self):
test_options = TestOptions() test_options = JniGeneratorOptions()
test_options.ptr_type = 'long' test_options.ptr_type = 'long'
test_data = """" test_data = """"
private native void nativeDestroy(long nativeChromeBrowserProvider); private native void nativeDestroy(long nativeChromeBrowserProvider);
@ -1131,8 +1181,9 @@ class Foo {
ptr_type=test_options.ptr_type), ptr_type=test_options.ptr_type),
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator( h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
'', 'org/chromium/TestJni', natives, [], [], jni_params, test_options) natives, [], [], jni_params,
test_options)
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testMainDexAnnotation(self): def testMainDexAnnotation(self):
@ -1210,8 +1261,7 @@ class Foo {
} }
} }
""" """
options = TestOptions() options = JniGeneratorOptions()
options.native_exports_optional = False
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/example/jni_generator/SampleForTests', options) test_data, 'org/chromium/example/jni_generator/SampleForTests', options)
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
@ -1229,7 +1279,7 @@ class Foo {
def willRaise(): def willRaise():
jni_generator.JNIFromJavaSource(test_data, jni_generator.JNIFromJavaSource(test_data,
'org/chromium/media/VideoCaptureFactory', 'org/chromium/media/VideoCaptureFactory',
TestOptions()) JniGeneratorOptions())
self.assertRaises(SyntaxError, willRaise) self.assertRaises(SyntaxError, willRaise)
@ -1249,7 +1299,7 @@ class Foo {
""" """
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testMultipleJNIAdditionalImport(self): def testMultipleJNIAdditionalImport(self):
@ -1270,7 +1320,7 @@ class Foo {
""" """
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testTracing(self): def testTracing(self):
@ -1291,7 +1341,7 @@ class Foo {
static native void nativeStaticMethod(); static native void nativeStaticMethod();
} }
""" """
options_with_tracing = TestOptions() options_with_tracing = JniGeneratorOptions()
options_with_tracing.enable_tracing = True options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing) test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1314,11 +1364,11 @@ class Foo {
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
TestOptions()) JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testSplitNameExample(self): def testSplitNameExample(self):
opts = TestOptions() opts = JniGeneratorOptions()
opts.split_name = "sample" opts.split_name = "sample"
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'), os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'),
@ -1327,19 +1377,58 @@ class Foo {
generated_text, golden_file='SampleForTestsWithSplit_jni.golden') generated_text, golden_file='SampleForTestsWithSplit_jni.golden')
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class ProxyTestGenerator(BaseTest): class ProxyTestGenerator(BaseTest):
def _BuildRegDictFromSample(self, options=None): def _BuildRegDictFromSample(self):
if options is None:
options = TestOptions()
path = self._JoinScriptDir( path = self._JoinScriptDir(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')) os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'))
reg_dict = jni_registration_generator._DictForPath(path) reg_dict = jni_registration_generator._DictForPath(
JniRegistrationGeneratorOptions(), path)
reg_dict = self._MergeRegistrationForTests([reg_dict]) reg_dict = self._MergeRegistrationForTests([reg_dict])
return reg_dict return reg_dict
def testEndToEndProxyHashed(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testEndToEndManualRegistration(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.manual_jni_registration = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'SampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(
input_java_files,
options,
name_to_goldens,
header_golden='SampleForAnnotationProcessorManualJni.golden')
def testEndToEndProxyJniWithModules(self):
input_java_files = [
'SampleForAnnotationProcessor.java', 'SampleModule.java'
]
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden',
'org/chromium/base/natives/module_GEN_JNI.java': 'ModuleGenJni.golden',
'J/module_N.java': 'ModuleJN.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testProxyNativesWithNatives(self): def testProxyNativesWithNatives(self):
test_data = """ test_data = """
package org.chromium.foo; package org.chromium.foo;
@ -1362,7 +1451,7 @@ class ProxyTestGenerator(BaseTest):
} }
""" """
options_with_tracing = TestOptions() options_with_tracing = JniGeneratorOptions()
options_with_tracing.enable_tracing = True options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing) test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1380,7 +1469,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
golden_natives = [ golden_natives = [
@ -1416,7 +1505,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', True) qualified_clazz, test_data, 'long', True)
golden_natives = [ golden_natives = [
@ -1452,7 +1541,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', False) qualified_clazz, test_data, 'long', False)
self.AssertListEquals(_RemoveHashedNames(natives), []) self.AssertListEquals(_RemoveHashedNames(natives), [])
@ -1481,9 +1570,10 @@ class ProxyTestGenerator(BaseTest):
} }
""" """
qualified_clazz = 'test/foo/Foo' qualified_clazz = 'test/foo/Foo'
jni_params = TestOptions() options = JniRegistrationGeneratorOptions()
options.manual_jni_registration = True
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
golden_natives = [ golden_natives = [
@ -1500,42 +1590,33 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(_RemoveHashedNames(natives), golden_natives) self.AssertListEquals(_RemoveHashedNames(natives), golden_natives)
jni_params = jni_generator.JniParams(qualified_clazz) jni_params = jni_generator.JniParams(qualified_clazz)
main_dex_header = jni_registration_generator.HeaderGenerator( main_dex_header = jni_registration_generator.DictionaryGenerator(
'', options, '', '', qualified_clazz, natives, jni_params,
'', main_dex=True).Generate()
qualified_clazz,
natives,
jni_params,
main_dex=True,
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header]) content = TestGenerator._MergeRegistrationForTests([main_dex_header])
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, jni_registration_generator.CreateFromDict(options, '', content))
use_hash=False,
manual_jni_registration=True))
other_qualified_clazz = 'test/foo/Bar' other_qualified_clazz = 'test/foo/Bar'
other_natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( other_natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
other_qualified_clazz, non_main_dex_test_data, 'long') other_qualified_clazz, non_main_dex_test_data, 'long')
jni_params = jni_generator.JniParams(other_qualified_clazz) jni_params = jni_generator.JniParams(other_qualified_clazz)
non_main_dex_header = jni_registration_generator.HeaderGenerator( non_main_dex_header = jni_registration_generator.DictionaryGenerator(
options,
'', '',
'', '',
other_qualified_clazz, other_qualified_clazz,
other_natives, other_natives,
jni_params, jni_params,
main_dex=False, main_dex=False).Generate()
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header] + content = TestGenerator._MergeRegistrationForTests([main_dex_header] +
[non_main_dex_header]) [non_main_dex_header])
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, jni_registration_generator.CreateFromDict(options, '', content),
use_hash=False,
manual_jni_registration=True),
'AndNonMainDex') 'AndNonMainDex')
def testProxyNatives(self): def testProxyNatives(self):
@ -1575,9 +1656,9 @@ class ProxyTestGenerator(BaseTest):
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
bad_spacing_natives = jni_generator.ProxyHelpers \ bad_spacing_natives, _ = jni_generator.ProxyHelpers \
.ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long') .ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long')
golden_natives = [ golden_natives = [
NativeMethod( NativeMethod(
@ -1616,34 +1697,32 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives)) self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
self.AssertListEquals(golden_natives, self.AssertListEquals(golden_natives,
_RemoveHashedNames(bad_spacing_natives)) _RemoveHashedNames(bad_spacing_natives))
options = JniGeneratorOptions()
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
jni_params = jni_generator.JniParams(qualified_clazz) jni_params = jni_generator.JniParams(qualified_clazz)
h1 = jni_generator.InlHeaderFileGenerator('', qualified_clazz, natives, [], h1 = jni_generator.InlHeaderFileGenerator('', '', qualified_clazz, natives,
[], jni_params, TestOptions()) [], [], jni_params, options)
self.AssertGoldenTextEquals(h1.GetContent()) self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator('', h2 = jni_registration_generator.DictionaryGenerator(reg_options, '', '',
'', qualified_clazz,
qualified_clazz, natives, jni_params,
natives, False)
jni_params,
False,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
proxy_opts = jni_registration_generator.ProxyOptions(
manual_jni_registration=True)
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(content, proxy_opts), jni_registration_generator.CreateProxyJavaFromDict(
reg_options, '', content),
suffix='Java') suffix='Java')
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content, reg_options, '', content),
proxy_opts.use_hash,
manual_jni_registration=proxy_opts.manual_jni_registration),
suffix='Registrations') suffix='Registrations')
def testProxyHashedExample(self): def testProxyHashedExample(self):
opts = TestOptions() opts = JniGeneratorOptions()
opts.use_proxy_hash = True opts.use_proxy_hash = True
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
@ -1654,20 +1733,6 @@ class ProxyTestGenerator(BaseTest):
generated_text, generated_text,
golden_file='HashedSampleForAnnotationProcessor_jni.golden') golden_file='HashedSampleForAnnotationProcessor_jni.golden')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), use_proxy_hash=True)
reg_dict = self._MergeRegistrationForTests([reg_dict])
proxy_opts = jni_registration_generator.ProxyOptions(use_hash=True)
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts),
golden_file='HashedSampleForAnnotationProcessorGenJni.golden')
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts, forwarding=True),
golden_file='HashedSampleForAnnotationProcessorGenJni.2.golden')
def testProxyJniExample(self): def testProxyJniExample(self):
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'), os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'),
@ -1676,21 +1741,20 @@ class ProxyTestGenerator(BaseTest):
generated_text, golden_file='SampleForAnnotationProcessor_jni.golden') generated_text, golden_file='SampleForAnnotationProcessor_jni.golden')
def testGenJniFlags(self): def testGenJniFlags(self):
options = JniRegistrationGeneratorOptions()
reg_dict = self._BuildRegDictFromSample() reg_dict = self._BuildRegDictFromSample()
proxy_options = jni_registration_generator.ProxyOptions()
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options) options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'Disabled') self.AssertGoldenTextEquals(content, 'Disabled')
proxy_options = jni_registration_generator.ProxyOptions(enable_mocks=True) options.enable_proxy_mocks = True
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options) options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'MocksEnabled') self.AssertGoldenTextEquals(content, 'MocksEnabled')
proxy_options = jni_registration_generator.ProxyOptions( options.require_mocks = True
enable_mocks=True, require_mocks=True)
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options) options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'MocksRequired') self.AssertGoldenTextEquals(content, 'MocksRequired')
def testProxyTypeInfoPreserved(self): def testProxyTypeInfoPreserved(self):
@ -1708,8 +1772,8 @@ class ProxyTestGenerator(BaseTest):
SomeJavaType[][] someObjects); SomeJavaType[][] someObjects);
} }
""" """
natives = ProxyHelpers.ExtractStaticProxyNatives('org/chromium/foo/FooJni', natives, _ = ProxyHelpers.ExtractStaticProxyNatives(
test_data, 'long') 'org/chromium/foo/FooJni', test_data, 'long')
golden_natives = [ golden_natives = [
NativeMethod( NativeMethod(
static=True, static=True,
@ -1758,63 +1822,53 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives)) self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class MultiplexTestGenerator(BaseTest): class MultiplexTestGenerator(BaseTest):
options = JniRegistrationGeneratorOptions()
options.enable_jni_multiplexing = True
def testProxyMultiplexGenJni(self): def testProxyMultiplexGenJni(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath( reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), self.options, self._JoinScriptDir(path))
enable_jni_multiplexing=True,
namespace='test')
reg_dict = self._MergeRegistrationForTests([reg_dict], reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True) enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict( jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts), self.options, '', reg_dict),
golden_file='testProxyMultiplexGenJni.golden') golden_file='testProxyMultiplexGenJni.golden')
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(reg_dict, jni_registration_generator.CreateProxyJavaFromDict(self.options,
proxy_opts, '',
reg_dict,
forwarding=True), forwarding=True),
golden_file='testProxyMultiplexGenJni.2.golden') golden_file='testProxyMultiplexGenJni.2.golden')
def testProxyMultiplexNatives(self): def testProxyMultiplexNatives(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath( reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), self.options, self._JoinScriptDir(path))
enable_jni_multiplexing=True,
namespace='test')
reg_dict = self._MergeRegistrationForTests([reg_dict], reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True) enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
reg_dict, self.options, '', reg_dict),
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing),
golden_file='testProxyMultiplexNatives.golden') golden_file='testProxyMultiplexNatives.golden')
def testProxyMultiplexNativesRegistration(self): def testProxyMultiplexNativesRegistration(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict_for_registration = jni_registration_generator._DictForPath( reg_dict_for_registration = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), self.options, self._JoinScriptDir(path))
enable_jni_multiplexing=True,
namespace='test')
reg_dict_for_registration = self._MergeRegistrationForTests( reg_dict_for_registration = self._MergeRegistrationForTests(
[reg_dict_for_registration], enable_jni_multiplexing=True) [reg_dict_for_registration], enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions( new_options = copy.copy(self.options)
enable_jni_multiplexing=True) new_options.manual_jni_registration = True
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict( jni_registration_generator.CreateFromDict(new_options, '',
reg_dict_for_registration, reg_dict_for_registration),
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=True),
golden_file='testProxyMultiplexNativesRegistration.golden') golden_file='testProxyMultiplexNativesRegistration.golden')

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright 2017 The Chromium Authors # Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
@ -37,12 +37,7 @@ MERGEABLE_KEYS = [
] ]
def _Generate(java_file_paths, def _Generate(options, java_file_paths):
srcjar_path,
proxy_opts,
header_path=None,
namespace='',
include_test_only=True):
"""Generates files required to perform JNI registration. """Generates files required to perform JNI registration.
Generates a srcjar containing a single class, GEN_JNI, that contains all Generates a srcjar containing a single class, GEN_JNI, that contains all
@ -53,92 +48,92 @@ def _Generate(java_file_paths,
JNI registration. JNI registration.
Args: Args:
options: arguments from the command line
java_file_paths: A list of java file paths. java_file_paths: A list of java file paths.
srcjar_path: Path to the GEN_JNI srcjar.
header_path: If specified, generates a header file in this location.
namespace: If specified, sets the namespace for the generated header file.
""" """
# Without multiprocessing, script takes ~13 seconds for chrome_public_apk # Without multiprocessing, script takes ~13 seconds for chrome_public_apk
# on a z620. With multiprocessing, takes ~2 seconds. # on a z620. With multiprocessing, takes ~2 seconds.
results = [] results = collections.defaultdict(list)
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
for d in pool.imap_unordered( for d in pool.imap_unordered(functools.partial(_DictForPath, options),
functools.partial( java_file_paths):
_DictForPath,
use_proxy_hash=proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
namespace=namespace,
include_test_only=include_test_only), java_file_paths):
if d: if d:
results.append(d) results[d['MODULE_NAME']].append(d)
# Sort to make output deterministic. combined_dicts = collections.defaultdict(dict)
results.sort(key=lambda d: d['FULL_CLASS_NAME']) for module_name, module_results in results.items():
# Sort to make output deterministic.
module_results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = combined_dicts[module_name]
for key in MERGEABLE_KEYS:
combined_dict[key] = ''.join(d.get(key, '') for d in module_results)
combined_dict = {} # PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have
for key in MERGEABLE_KEYS: # duplicates for JNI multiplexing since all native methods with similar
combined_dict[key] = ''.join(d.get(key, '') for d in results) # signatures map to the same proxy. Similarly, there may be multiple switch
# PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have # case entries for the same proxy signatures.
# duplicates for JNI multiplexing since all native methods with similar if options.enable_jni_multiplexing:
# signatures map to the same proxy. Similarly, there may be multiple switch proxy_signatures_list = sorted(
# case entries for the same proxy signatures. set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
if proxy_opts.enable_jni_multiplexing: combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
proxy_signatures_list = sorted( signature for signature in proxy_signatures_list)
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
signature for signature in proxy_signatures_list)
proxy_native_array_list = sorted( proxy_native_array_list = sorted(
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split('},\n'))) set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split(
combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join( '},\n')))
p for p in proxy_native_array_list if p != '') + '}' combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join(
p for p in proxy_native_array_list if p != '') + '}'
signature_to_cases = collections.defaultdict(list) signature_to_cases = collections.defaultdict(list)
for d in results: for d in module_results:
for signature, cases in d['SIGNATURE_TO_CASES'].items(): for signature, cases in d['SIGNATURE_TO_CASES'].items():
signature_to_cases[signature].extend(cases) signature_to_cases[signature].extend(cases)
combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls( combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls(
signature_to_cases, namespace) signature_to_cases, module_name)
if options.header_path:
assert len(
combined_dicts) == 1, 'Cannot output a header for multiple modules'
module_name = next(iter(combined_dicts))
combined_dict = combined_dicts[module_name]
if header_path:
combined_dict['HEADER_GUARD'] = \ combined_dict['HEADER_GUARD'] = \
os.path.splitext(header_path)[0].replace('/', '_').upper() + '_' os.path.splitext(options.header_path)[0].replace('/', '_').replace('.', '_').upper() + '_'
combined_dict['NAMESPACE'] = namespace combined_dict['NAMESPACE'] = options.namespace
header_content = CreateFromDict( header_content = CreateFromDict(options, module_name, combined_dict)
combined_dict, with build_utils.AtomicOutput(options.header_path, mode='w') as f:
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=proxy_opts.manual_jni_registration)
with build_utils.AtomicOutput(header_path, mode='w') as f:
f.write(header_content) f.write(header_content)
with build_utils.AtomicOutput(srcjar_path) as f: with build_utils.AtomicOutput(options.srcjar_path) as f:
with zipfile.ZipFile(f, 'w') as srcjar: with zipfile.ZipFile(f, 'w') as srcjar:
if proxy_opts.use_hash or proxy_opts.enable_jni_multiplexing: for module_name, combined_dict in combined_dicts.items():
# J/N.java
build_utils.AddToZipHermetic( if options.use_proxy_hash or options.enable_jni_multiplexing:
srcjar, # J/N.java
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(True), build_utils.AddToZipHermetic(
data=CreateProxyJavaFromDict(combined_dict, proxy_opts)) srcjar,
# org/chromium/base/natives/GEN_JNI.java '%s.java' %
build_utils.AddToZipHermetic( jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name),
srcjar, data=CreateProxyJavaFromDict(options, module_name, combined_dict))
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False), # org/chromium/base/natives/GEN_JNI.java
data=CreateProxyJavaFromDict( build_utils.AddToZipHermetic(
combined_dict, proxy_opts, forwarding=True)) srcjar,
else: '%s.java' %
# org/chromium/base/natives/GEN_JNI.java jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name),
build_utils.AddToZipHermetic( data=CreateProxyJavaFromDict(options,
srcjar, module_name,
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False), combined_dict,
data=CreateProxyJavaFromDict(combined_dict, proxy_opts)) forwarding=True))
else:
# org/chromium/base/natives/GEN_JNI.java
build_utils.AddToZipHermetic(
srcjar,
'%s.java' %
jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name),
data=CreateProxyJavaFromDict(options, module_name, combined_dict))
def _DictForPath(path, def _DictForPath(options, path):
use_proxy_hash=False,
enable_jni_multiplexing=False,
namespace='',
include_test_only=True):
with open(path) as f: with open(path) as f:
contents = jni_generator.RemoveComments(f.read()) contents = jni_generator.RemoveComments(f.read())
if '@JniIgnoreNatives' in contents: if '@JniIgnoreNatives' in contents:
@ -146,13 +141,14 @@ def _DictForPath(path,
fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName( fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName(
path, contents) path, contents)
natives = jni_generator.ExtractNatives(contents, 'long')
natives += jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives, module_name = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class=fully_qualified_class, fully_qualified_class=fully_qualified_class,
contents=contents, contents=contents,
ptr_type='long', ptr_type='long',
include_test_only=include_test_only) include_test_only=options.include_test_only)
natives += jni_generator.ExtractNatives(contents, 'long')
if len(natives) == 0: if len(natives) == 0:
return None return None
# The namespace for the content is separate from the namespace for the # The namespace for the content is separate from the namespace for the
@ -161,19 +157,13 @@ def _DictForPath(path,
jni_params = jni_generator.JniParams(fully_qualified_class) jni_params = jni_generator.JniParams(fully_qualified_class)
jni_params.ExtractImportsAndInnerClasses(contents) jni_params.ExtractImportsAndInnerClasses(contents)
is_main_dex = jni_generator.IsMainDexJavaClass(contents) is_main_dex = jni_generator.IsMainDexJavaClass(contents)
header_generator = HeaderGenerator( dict_generator = DictionaryGenerator(options, module_name, content_namespace,
namespace, fully_qualified_class, natives,
content_namespace, jni_params, is_main_dex)
fully_qualified_class, return dict_generator.Generate()
natives,
jni_params,
is_main_dex,
use_proxy_hash,
enable_jni_multiplexing=enable_jni_multiplexing)
return header_generator.Generate()
def _AddForwardingCalls(signature_to_cases, namespace): def _AddForwardingCalls(signature_to_cases, module_name):
template = string.Template(""" template = string.Template("""
JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}( JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}(
JNIEnv* env, JNIEnv* env,
@ -199,7 +189,8 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
jni_generator.JavaDataTypeToC(return_type), jni_generator.JavaDataTypeToC(return_type),
'CLASS_NAME': 'CLASS_NAME':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(True) + namespace), jni_generator.ProxyHelpers.GetQualifiedClass(True,
module_name)),
'PROXY_SIGNATURE': 'PROXY_SIGNATURE':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list)), _GetMultiplexProxyName(return_type, params_list)),
@ -214,9 +205,7 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
return ''.join(s for s in switch_statements) return ''.join(s for s in switch_statements)
def _SetProxyRegistrationFields(registration_dict, use_hash, def _SetProxyRegistrationFields(options, module_name, registration_dict):
enable_jni_multiplexing,
manual_jni_registration):
registration_template = string.Template("""\ registration_template = string.Template("""\
static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = { static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = {
@ -279,20 +268,20 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
} // namespace ${NAMESPACE} } // namespace ${NAMESPACE}
""") """)
short_name = options.use_proxy_hash or options.enable_jni_multiplexing
sub_dict = { sub_dict = {
'ESCAPED_PROXY_CLASS': 'ESCAPED_PROXY_CLASS':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass( jni_generator.ProxyHelpers.GetQualifiedClass(short_name,
use_hash or enable_jni_multiplexing)), module_name)),
'PROXY_CLASS': 'PROXY_CLASS':
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash jni_generator.ProxyHelpers.GetQualifiedClass(short_name, module_name),
or enable_jni_multiplexing),
'KMETHODS': 'KMETHODS':
registration_dict['PROXY_NATIVE_METHOD_ARRAY'], registration_dict['PROXY_NATIVE_METHOD_ARRAY'],
'REGISTRATION_NAME': 'REGISTRATION_NAME':
jni_generator.GetRegistrationFunctionName( jni_generator.GetRegistrationFunctionName(
jni_generator.ProxyHelpers.GetQualifiedClass( jni_generator.ProxyHelpers.GetQualifiedClass(short_name,
use_hash or enable_jni_multiplexing)), module_name)),
} }
if registration_dict['PROXY_NATIVE_METHOD_ARRAY']: if registration_dict['PROXY_NATIVE_METHOD_ARRAY']:
@ -316,14 +305,17 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration
registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call
if manual_jni_registration: if options.manual_jni_registration:
registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute( registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute(
registration_dict) registration_dict)
else: else:
registration_dict['MANUAL_REGISTRATION'] = '' registration_dict['MANUAL_REGISTRATION'] = ''
def CreateProxyJavaFromDict(registration_dict, proxy_opts, forwarding=False): def CreateProxyJavaFromDict(options,
module_name,
registration_dict,
forwarding=False):
template = string.Template("""\ template = string.Template("""\
// Copyright 2018 The Chromium Authors // Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
@ -341,19 +333,20 @@ ${METHODS}
} }
""") """)
is_natives_class = not forwarding and (proxy_opts.use_hash is_natives_class = not forwarding and (options.use_proxy_hash
or proxy_opts.enable_jni_multiplexing) or options.enable_jni_multiplexing)
class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class) class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class,
module_name)
package = jni_generator.ProxyHelpers.GetPackage(is_natives_class) package = jni_generator.ProxyHelpers.GetPackage(is_natives_class)
if forwarding or not (proxy_opts.use_hash if forwarding or not (options.use_proxy_hash
or proxy_opts.enable_jni_multiplexing): or options.enable_jni_multiplexing):
fields = string.Template("""\ fields = string.Template("""\
public static final boolean TESTING_ENABLED = ${TESTING_ENABLED}; public static final boolean TESTING_ENABLED = ${TESTING_ENABLED};
public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK}; public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK};
""").substitute({ """).substitute({
'TESTING_ENABLED': str(proxy_opts.enable_mocks).lower(), 'TESTING_ENABLED': str(options.enable_proxy_mocks).lower(),
'REQUIRE_MOCK': str(proxy_opts.require_mocks).lower(), 'REQUIRE_MOCK': str(options.require_mocks).lower(),
}) })
else: else:
fields = '' fields = ''
@ -371,10 +364,7 @@ ${METHODS}
}) })
def CreateFromDict(registration_dict, def CreateFromDict(options, module_name, registration_dict):
use_hash,
enable_jni_multiplexing=False,
manual_jni_registration=False):
"""Returns the content of the header file.""" """Returns the content of the header file."""
template = string.Template("""\ template = string.Template("""\
@ -408,9 +398,8 @@ ${FORWARDING_CALLS}
${MANUAL_REGISTRATION} ${MANUAL_REGISTRATION}
#endif // ${HEADER_GUARD} #endif // ${HEADER_GUARD}
""") """)
_SetProxyRegistrationFields(registration_dict, use_hash, _SetProxyRegistrationFields(options, module_name, registration_dict)
enable_jni_multiplexing, manual_jni_registration) if not options.enable_jni_multiplexing:
if not enable_jni_multiplexing:
registration_dict['FORWARDING_CALLS'] = '' registration_dict['FORWARDING_CALLS'] = ''
if len(registration_dict['FORWARD_DECLARATIONS']) == 0: if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return '' return ''
@ -436,19 +425,13 @@ def _GetJavaToNativeParamsList(params_list):
return 'jlong switch_num, ' + ', '.join(params_in_stub) return 'jlong switch_num, ' + ', '.join(params_in_stub)
class HeaderGenerator(object): class DictionaryGenerator(object):
"""Generates an inline header file for JNI registration.""" """Generates an inline header file for JNI registration."""
def __init__(self, def __init__(self, options, module_name, content_namespace,
namespace, fully_qualified_class, natives, jni_params, main_dex):
content_namespace, self.options = options
fully_qualified_class, self.module_name = module_name
natives,
jni_params,
main_dex,
use_proxy_hash,
enable_jni_multiplexing=False):
self.namespace = namespace
self.content_namespace = content_namespace self.content_namespace = content_namespace
self.natives = natives self.natives = natives
self.proxy_natives = [n for n in natives if n.is_proxy] self.proxy_natives = [n for n in natives if n.is_proxy]
@ -459,15 +442,17 @@ class HeaderGenerator(object):
self.main_dex = main_dex self.main_dex = main_dex
self.helper = jni_generator.HeaderFileGeneratorHelper( self.helper = jni_generator.HeaderFileGeneratorHelper(
self.class_name, self.class_name,
self.module_name,
fully_qualified_class, fully_qualified_class,
use_proxy_hash, options.use_proxy_hash,
enable_jni_multiplexing=enable_jni_multiplexing) enable_jni_multiplexing=options.enable_jni_multiplexing)
self.use_proxy_hash = use_proxy_hash
self.enable_jni_multiplexing = enable_jni_multiplexing
self.registration_dict = None self.registration_dict = None
def Generate(self): def Generate(self):
self.registration_dict = {'FULL_CLASS_NAME': self.fully_qualified_class} self.registration_dict = {
'FULL_CLASS_NAME': self.fully_qualified_class,
'MODULE_NAME': self.module_name
}
self._AddClassPathDeclarations() self._AddClassPathDeclarations()
self._AddForwardDeclaration() self._AddForwardDeclaration()
self._AddJNINativeMethodsArrays() self._AddJNINativeMethodsArrays()
@ -476,19 +461,16 @@ class HeaderGenerator(object):
self._AddRegisterNativesFunctions() self._AddRegisterNativesFunctions()
self.registration_dict['PROXY_NATIVE_SIGNATURES'] = (''.join( self.registration_dict['PROXY_NATIVE_SIGNATURES'] = (''.join(
_MakeProxySignature( _MakeProxySignature(self.options, native)
native,
self.use_proxy_hash,
enable_jni_multiplexing=self.enable_jni_multiplexing)
for native in self.proxy_natives)) for native in self.proxy_natives))
if self.enable_jni_multiplexing:
if self.options.enable_jni_multiplexing:
self._AssignSwitchNumberToNatives() self._AssignSwitchNumberToNatives()
self._AddCases() self._AddCases()
if self.use_proxy_hash or self.enable_jni_multiplexing: if self.options.use_proxy_hash or self.options.enable_jni_multiplexing:
self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join( self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join(
_MakeForwardingProxy( _MakeForwardingProxy(self.options, self.module_name, native)
native, enable_jni_multiplexing=self.enable_jni_multiplexing)
for native in self.proxy_natives)) for native in self.proxy_natives))
return self.registration_dict return self.registration_dict
@ -582,10 +564,11 @@ ${KMETHODS}
if native.is_proxy: if native.is_proxy:
# Literal name of the native method in the class that contains the actual # Literal name of the native method in the class that contains the actual
# native declaration. # native declaration.
if self.enable_jni_multiplexing: if self.options.enable_jni_multiplexing:
return_type, params_list = native.return_and_signature return_type, params_list = native.return_and_signature
class_name = jni_generator.EscapeClassName( class_name = jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(True) + self.namespace) jni_generator.ProxyHelpers.GetQualifiedClass(
True, self.module_name))
proxy_signature = jni_generator.EscapeClassName( proxy_signature = jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list)) _GetMultiplexProxyName(return_type, params_list))
@ -594,7 +577,7 @@ ${KMETHODS}
[jni_generator.Param(datatype='long', name='switch_num')] + [jni_generator.Param(datatype='long', name='switch_num')] +
native.params, native.return_type) native.params, native.return_type)
stub_name = 'Java_' + class_name + '_' + proxy_signature stub_name = 'Java_' + class_name + '_' + proxy_signature
elif self.use_proxy_hash: elif self.options.use_proxy_hash:
name = native.hashed_proxy_name name = native.hashed_proxy_name
else: else:
name = native.proxy_name name = native.proxy_name
@ -608,7 +591,7 @@ ${KMETHODS}
def _AddProxyNativeMethodKStrings(self): def _AddProxyNativeMethodKStrings(self):
"""Returns KMethodString for wrapped native methods in all_classes """ """Returns KMethodString for wrapped native methods in all_classes """
if self.main_dex or self.enable_jni_multiplexing: if self.main_dex or self.options.enable_jni_multiplexing:
key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX' key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'
else: else:
key = 'PROXY_NATIVE_METHOD_ARRAY' key = 'PROXY_NATIVE_METHOD_ARRAY'
@ -618,7 +601,7 @@ ${KMETHODS}
self._SetDictValue(key, proxy_k_strings) self._SetDictValue(key, proxy_k_strings)
def _SubstituteNativeMethods(self, template, sub_proxy=False): def _SubstituteNativeMethods(self, template):
"""Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided """Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided
template.""" template."""
ret = [] ret = []
@ -626,10 +609,10 @@ ${KMETHODS}
all_classes[self.class_name] = self.fully_qualified_class all_classes[self.class_name] = self.fully_qualified_class
for clazz, full_clazz in all_classes.items(): for clazz, full_clazz in all_classes.items():
if not sub_proxy: if clazz == jni_generator.ProxyHelpers.GetClass(
if clazz == jni_generator.ProxyHelpers.GetClass( self.options.use_proxy_hash or self.options.enable_jni_multiplexing,
self.use_proxy_hash or self.enable_jni_multiplexing): self.module_name):
continue continue
kmethods = self._GetKMethodsString(clazz) kmethods = self._GetKMethodsString(clazz)
namespace_str = '' namespace_str = ''
@ -723,6 +706,8 @@ ${NATIVES}\
params = _GetParamsListForMultiplex(signature[1], with_types=False) params = _GetParamsListForMultiplex(signature[1], with_types=False)
values = { values = {
'SWITCH_NUM': native.switch_num, 'SWITCH_NUM': native.switch_num,
# We are forced to call the generated stub instead of the impl because
# the impl is not guaranteed to have a globally unique name.
'STUB_NAME': self.helper.GetStubName(native), 'STUB_NAME': self.helper.GetStubName(native),
'PARAMS': params, 'PARAMS': params,
} }
@ -778,7 +763,7 @@ def _GetMultiplexProxyName(return_type, params_list):
return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params
def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False): def _MakeForwardingProxy(options, module_name, proxy_native):
template = string.Template(""" template = string.Template("""
public static ${RETURN_TYPE} ${METHOD_NAME}(${PARAMS_WITH_TYPES}) { public static ${RETURN_TYPE} ${METHOD_NAME}(${PARAMS_WITH_TYPES}) {
${MAYBE_RETURN}${PROXY_CLASS}.${PROXY_METHOD_NAME}(${PARAM_NAMES}); ${MAYBE_RETURN}${PROXY_CLASS}.${PROXY_METHOD_NAME}(${PARAM_NAMES});
@ -787,9 +772,9 @@ def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
params_with_types = ', '.join( params_with_types = ', '.join(
'%s %s' % (p.datatype, p.name) for p in proxy_native.params) '%s %s' % (p.datatype, p.name) for p in proxy_native.params)
param_names = ', '.join(p.name for p in proxy_native.params) param_names = ', '.join(p.name for p in proxy_native.params)
proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True) proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name)
if enable_jni_multiplexing: if options.enable_jni_multiplexing:
if not param_names: if not param_names:
param_names = proxy_native.switch_num + 'L' param_names = proxy_native.switch_num + 'L'
else: else:
@ -817,15 +802,13 @@ def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
}) })
def _MakeProxySignature(proxy_native, def _MakeProxySignature(options, proxy_native):
use_proxy_hash,
enable_jni_multiplexing=False):
params_with_types = ', '.join('%s %s' % (p.datatype, p.name) params_with_types = ', '.join('%s %s' % (p.datatype, p.name)
for p in proxy_native.params) for p in proxy_native.params)
native_method_line = """ native_method_line = """
public static native ${RETURN} ${PROXY_NAME}(${PARAMS_WITH_TYPES});""" public static native ${RETURN} ${PROXY_NAME}(${PARAMS_WITH_TYPES});"""
if enable_jni_multiplexing: if options.enable_jni_multiplexing:
# This has to be only one line and without comments because all the proxy # This has to be only one line and without comments because all the proxy
# signatures will be joined, then split on new lines with duplicates removed # signatures will be joined, then split on new lines with duplicates removed
# since multiple |proxy_native|s map to the same multiplexed signature. # since multiple |proxy_native|s map to the same multiplexed signature.
@ -836,7 +819,7 @@ def _MakeProxySignature(proxy_native,
proxy_name = _GetMultiplexProxyName(return_type, params_list) proxy_name = _GetMultiplexProxyName(return_type, params_list)
params_with_types = 'long switch_num' + _GetParamsListForMultiplex( params_with_types = 'long switch_num' + _GetParamsListForMultiplex(
params_list, with_types=True) params_list, with_types=True)
elif use_proxy_hash: elif options.use_proxy_hash:
signature_template = string.Template(""" signature_template = string.Template("""
// Original name: ${ALT_NAME}""" + native_method_line) // Original name: ${ALT_NAME}""" + native_method_line)
@ -859,18 +842,6 @@ def _MakeProxySignature(proxy_native,
}) })
class ProxyOptions:
def __init__(self, **kwargs):
self.use_hash = kwargs.get('use_hash', False)
self.enable_jni_multiplexing = kwargs.get('enable_jni_multiplexing', False)
self.manual_jni_registration = kwargs.get('manual_jni_registration', False)
self.enable_mocks = kwargs.get('enable_mocks', False)
self.require_mocks = kwargs.get('require_mocks', False)
# Can never require and disable.
assert self.enable_mocks or not self.require_mocks
def main(argv): def main(argv):
arg_parser = argparse.ArgumentParser() arg_parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(arg_parser) build_utils.AddDepfileOption(arg_parser)
@ -888,64 +859,56 @@ def main(argv):
required=True, required=True,
help='Path to output srcjar for GEN_JNI.java (and J/N.java if proxy' help='Path to output srcjar for GEN_JNI.java (and J/N.java if proxy'
' hash is enabled).') ' hash is enabled).')
arg_parser.add_argument( arg_parser.add_argument('--file-exclusions',
'--sources-exclusions', default=[],
default=[], help='A list of Java files which should be ignored '
help='A list of Java files which should be ignored ' 'by the parser.')
'by the parser.')
arg_parser.add_argument( arg_parser.add_argument(
'--namespace', '--namespace',
default='', default='',
help='Namespace to wrap the registration functions ' help='Native namespace to wrap the registration functions '
'into.') 'into.')
# TODO(crbug.com/898261) hook these flags up to the build config to enable # TODO(crbug.com/898261) hook these flags up to the build config to enable
# mocking in instrumentation tests # mocking in instrumentation tests
arg_parser.add_argument( arg_parser.add_argument(
'--enable_proxy_mocks', '--enable-proxy-mocks',
default=False, default=False,
action='store_true', action='store_true',
help='Allows proxy native impls to be mocked through Java.') help='Allows proxy native impls to be mocked through Java.')
arg_parser.add_argument( arg_parser.add_argument(
'--require_mocks', '--require-mocks',
default=False, default=False,
action='store_true', action='store_true',
help='Requires all used native implementations to have a mock set when ' help='Requires all used native implementations to have a mock set when '
'called. Otherwise an exception will be thrown.') 'called. Otherwise an exception will be thrown.')
arg_parser.add_argument( arg_parser.add_argument(
'--use_proxy_hash', '--use-proxy-hash',
action='store_true', action='store_true',
help='Enables hashing of the native declaration for methods in ' help='Enables hashing of the native declaration for methods in '
'an @JniNatives interface') 'an @JniNatives interface')
arg_parser.add_argument( arg_parser.add_argument(
'--enable_jni_multiplexing', '--enable-jni-multiplexing',
action='store_true', action='store_true',
help='Enables JNI multiplexing for Java native methods') help='Enables JNI multiplexing for Java native methods')
arg_parser.add_argument( arg_parser.add_argument(
'--manual_jni_registration', '--manual-jni-registration',
action='store_true', action='store_true',
help='Manually do JNI registration - required for crazy linker') help='Manually do JNI registration - required for crazy linker')
arg_parser.add_argument('--include_test_only', arg_parser.add_argument('--include-test-only',
action='store_true', action='store_true',
help='Whether to maintain ForTesting JNI methods.') help='Whether to maintain ForTesting JNI methods.')
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:])) args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
if not args.enable_proxy_mocks and args.require_mocks: if not args.enable_proxy_mocks and args.require_mocks:
arg_parser.error( arg_parser.error(
'Invalid arguments: --require_mocks without --enable_proxy_mocks. ' 'Invalid arguments: --require-mocks without --enable-proxy-mocks. '
'Cannot require mocks if they are not enabled.') 'Cannot require mocks if they are not enabled.')
if not args.header_path and args.manual_jni_registration: if not args.header_path and args.manual_jni_registration:
arg_parser.error( arg_parser.error(
'Invalid arguments: --manual_jni_registration without --header-path. ' 'Invalid arguments: --manual-jni-registration without --header-path. '
'Cannot manually register JNI if there is no output header file.') 'Cannot manually register JNI if there is no output header file.')
sources_files = sorted(set(build_utils.ParseGnList(args.sources_files))) sources_files = sorted(set(build_utils.ParseGnList(args.sources_files)))
proxy_opts = ProxyOptions(
use_hash=args.use_proxy_hash,
enable_jni_multiplexing=args.enable_jni_multiplexing,
manual_jni_registration=args.manual_jni_registration,
require_mocks=args.require_mocks,
enable_mocks=args.enable_proxy_mocks)
java_file_paths = [] java_file_paths = []
for f in sources_files: for f in sources_files:
@ -953,13 +916,8 @@ def main(argv):
# skip Kotlin files as they are not supported by JNI generation. # skip Kotlin files as they are not supported by JNI generation.
java_file_paths.extend( java_file_paths.extend(
p for p in build_utils.ReadSourcesList(f) if p.startswith('..') p for p in build_utils.ReadSourcesList(f) if p.startswith('..')
and p not in args.sources_exclusions and not p.endswith('.kt')) and p not in args.file_exclusions and not p.endswith('.kt'))
_Generate(java_file_paths, _Generate(args, java_file_paths)
args.srcjar_path,
proxy_opts=proxy_opts,
header_path=args.header_path,
namespace=args.namespace,
include_test_only=args.include_test_only)
if args.depfile: if args.depfile:
build_utils.WriteDepfile(args.depfile, args.srcjar_path, build_utils.WriteDepfile(args.depfile, args.srcjar_path,

View File

@ -3,7 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/android/jni_generator/sample_jni_apk__final_jni_generated.h" #include "base/android/jni_generator/jni_registration_generated.h"
#include "base/android/jni_utils.h" #include "base/android/jni_utils.h"
// This is called by the VM when the shared library is first loaded. // This is called by the VM when the shared library is first loaded.

View File

@ -0,0 +1,105 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/meminfo_dump_provider.h"
#include <jni.h>
#include "base/android/jni_android.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/base_jni_headers/MemoryInfoBridge_jni.h"
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base::android {
MeminfoDumpProvider::MeminfoDumpProvider() {
#if BUILDFLAG(ENABLE_BASE_TRACING)
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, kDumpProviderName, nullptr);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
MeminfoDumpProvider& MeminfoDumpProvider::Initialize() {
static base::NoDestructor<MeminfoDumpProvider> instance;
return *instance.get();
}
bool MeminfoDumpProvider::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// This is best-effort, and will be wrong if there are other callers of
// ActivityManager#getProcessMemoryInfo(), either in this process or from
// another process which is allowed to do so (typically, adb).
//
// However, since the framework doesn't document throttling in any non-vague
// terms and the results are not timestamped, this is the best we can do. The
// delay and the rest of the assumptions here come from
// https://android.googlesource.com/platform/frameworks/base/+/refs/heads/android13-dev/services/core/java/com/android/server/am/ActivityManagerService.java#4093.
//
// We could always report the value on pre-Q devices, but that would skew
// reported data. Also, some OEMs may have cherry-picked the Q change, meaning
// that it's safer and more accurate to not report likely-stale data on all
// Android releases.
base::TimeTicks now = base::TimeTicks::Now();
bool stale_data = (now - last_collection_time_) < base::Minutes(5);
// Background data dumps (as in the BACKGROUND level of detail, not the
// application being in background) should not include stale data, since it
// would confuse data in UMA. In particular, the background/foreground session
// filter would no longer be accurate.
if (stale_data && args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::DETAILED) {
return true;
}
base::trace_event::MemoryAllocatorDump* dump =
pmd->CreateAllocatorDump(kDumpName);
// Data is either expected to be fresh, or this is a manually requested dump,
// and we should still report data, but note that it is stale.
dump->AddScalar(kIsStaleName, "bool", stale_data);
last_collection_time_ = now;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> memory_info =
Java_MemoryInfoBridge_getActivityManagerMemoryInfoForSelf(env);
// Tell the manager that collection failed. Since this is likely not a
// transient failure, don't return an empty dump, and let the manager exclude
// this provider from the next dump.
if (memory_info.is_null()) {
LOG(WARNING) << "Got a null value";
return false;
}
ScopedJavaLocalRef<jclass> clazz{env, env->GetObjectClass(memory_info.obj())};
jfieldID other_private_dirty_id =
env->GetFieldID(clazz.obj(), "otherPrivateDirty", "I");
jfieldID other_pss_id = env->GetFieldID(clazz.obj(), "otherPss", "I");
int other_private_dirty_kb =
env->GetIntField(memory_info.obj(), other_private_dirty_id);
int other_pss_kb = env->GetIntField(memory_info.obj(), other_pss_id);
// What "other" covers is not documented in Debug#MemoryInfo, nor in
// ActivityManager#getProcessMemoryInfo. However, it calls
// Debug#getMemoryInfo(), which ends up summing all the heaps in the range
// [HEAP_DALVIK_OTHER, HEAP_OTHER_MEMTRACK]. See the definitions in
// https://android.googlesource.com/platform/frameworks/base/+/0b7c1774ba42daef7c80bf2f00fe1c0327e756ae/core/jni/android_os_Debug.cpp#60,
// and the code in android_os_Debug_getDirtyPagesPid() in the same file.
dump->AddScalar(kPrivateDirtyMetricName, "bytes",
static_cast<uint64_t>(other_private_dirty_kb) * 1024);
dump->AddScalar(kPssMetricName, "bytes",
static_cast<uint64_t>(other_pss_kb) * 1024);
return true;
#else // BUILDFLAG(ENABLE_BASE_TRACING)
return false;
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace base::android

View File

@ -0,0 +1,38 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#define BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#include "base/base_export.h"
#include "base/no_destructor.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
namespace base::android {
class BASE_EXPORT MeminfoDumpProvider
: public base::trace_event::MemoryDumpProvider {
public:
// Returns the instance for testing.
static MeminfoDumpProvider& Initialize();
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
static constexpr char kDumpProviderName[] = "android_meminfo";
static constexpr char kDumpName[] = "meminfo";
static constexpr char kIsStaleName[] = "is_stale";
static constexpr char kPssMetricName[] = "other_pss";
static constexpr char kPrivateDirtyMetricName[] = "other_private_dirty";
private:
friend class base::NoDestructor<MeminfoDumpProvider>;
MeminfoDumpProvider();
base::TimeTicks last_collection_time_;
};
} // namespace base::android
#endif // BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_

View File

@ -4,6 +4,7 @@
#include "base/android/callback_android.h" #include "base/android/callback_android.h"
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h" #include "base/android/jni_string.h"
#include "base/base_jni_headers/NativeUmaRecorder_jni.h" #include "base/base_jni_headers/NativeUmaRecorder_jni.h"
#include "base/format_macros.h" #include "base/format_macros.h"
@ -264,6 +265,36 @@ jint JNI_NativeUmaRecorder_GetHistogramTotalCountForTesting(
return actual_count; return actual_count;
} }
// Returns an array with 3 entries for each bucket, representing (min, max,
// count).
ScopedJavaLocalRef<jlongArray>
JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(
JNIEnv* env,
const JavaParamRef<jstring>& histogram_name) {
std::string name = android::ConvertJavaStringToUTF8(env, histogram_name);
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
std::vector<int64_t> buckets;
if (histogram == nullptr) {
// No samples have been recorded for this histogram.
return base::android::ToJavaLongArray(env, buckets);
}
std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
for (auto sampleCountIterator = samples->Iterator();
!sampleCountIterator->Done(); sampleCountIterator->Next()) {
HistogramBase::Sample min;
int64_t max;
HistogramBase::Count count;
sampleCountIterator->Get(&min, &max, &count);
buckets.push_back(min);
buckets.push_back(max);
buckets.push_back(count);
}
return base::android::ToJavaLongArray(env, buckets);
}
jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) { jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) {
HistogramsSnapshot* snapshot = new HistogramsSnapshot(); HistogramsSnapshot* snapshot = new HistogramsSnapshot();
for (const auto* const histogram : StatisticsRecorder::GetHistograms()) { for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {

View File

@ -240,7 +240,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This can get very large as it constructs the whole data structure in // This can get very large as it constructs the whole data structure in
// memory before dumping it to the file. // memory before dumping it to the file.
Value root(Value::Type::DICTIONARY); Value root(Value::Type::DICT);
uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed); uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed);
root.SetStringKey("total_calls_count", root.SetStringKey("total_calls_count",
base::StringPrintf("%" PRIu32, total_calls_count)); base::StringPrintf("%" PRIu32, total_calls_count));
@ -252,7 +252,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This callee was never called. // This callee was never called.
continue; continue;
Value callee_element(Value::Type::DICTIONARY); Value callee_element(Value::Type::DICT);
uint32_t callee_offset = i * 4; uint32_t callee_offset = i * 4;
callee_element.SetStringKey("index", callee_element.SetStringKey("index",
base::StringPrintf("%" PRIuS, caller_index)); base::StringPrintf("%" PRIuS, caller_index));
@ -278,7 +278,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// No misses. // No misses.
continue; continue;
Value caller_count(Value::Type::DICTIONARY); Value caller_count(Value::Type::DICT);
caller_count.SetStringKey("caller_offset", caller_count.SetStringKey("caller_offset",
base::StringPrintf("%" PRIu32, caller_offset)); base::StringPrintf("%" PRIu32, caller_offset));
caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count)); caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count));

View File

@ -2,6 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
// IMPORTANT NOTE: deprecated. Use std::atomic instead.
//
// Rationale:
// - Uniformity: most of the code uses std::atomic, and the underlying
// implementation is the same. Use the STL one.
// - Clearer code: return values from some operations (e.g. CompareAndSwap)
// differ from the equivalent ones in std::atomic, leading to confusion.
// - Richer semantics: can use actual types, rather than e.g. Atomic32 for a
// boolean flag, or AtomicWord for T*. Bitwise operations (e.g. fetch_or())
// are only in std::atomic.
// - Harder to misuse: base::subtle::Atomic32 is just an int, making it possible
// to accidentally manipulate, not realizing that there are no atomic
// semantics attached to it. For instance, "Atomic32 a; a++;" is almost
// certainly incorrect.
// For atomic operations on reference counts, see atomic_refcount.h. // For atomic operations on reference counts, see atomic_refcount.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h. // For atomic operations on sequence numbers, see atomic_sequence_num.h.

View File

@ -137,9 +137,8 @@ class BASE_EXPORT BigEndianWriter {
template<typename T> template<typename T>
bool Write(T v); bool Write(T v);
// TODO(crbug.com/1298696): Breaks net_unittests. raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> ptr_;
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> ptr_; raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> end_;
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> end_;
}; };
} // namespace base } // namespace base

View File

@ -180,10 +180,24 @@ std::ostream& CheckError::stream() {
} }
CheckError::~CheckError() { CheckError::~CheckError() {
// TODO(crbug.com/1409729): Consider splitting out CHECK from DCHECK so that
// the destructor can be marked [[noreturn]] and we don't need to check
// severity in the destructor.
const bool is_fatal = log_message_->severity() == LOGGING_FATAL;
// Note: This function ends up in crash stack traces. If its full name // Note: This function ends up in crash stack traces. If its full name
// changes, the crash server's magic signature logic needs to be updated. // changes, the crash server's magic signature logic needs to be updated.
// See cl/306632920. // See cl/306632920.
delete log_message_; delete log_message_;
// Make sure we crash even if LOG(FATAL) has been overridden.
// TODO(crbug.com/1409729): Include Windows here too. This is done in steps to
// prevent backsliding on platforms where this goes through CQ.
// Windows is blocked by:
// * All/RenderProcessHostWriteableFileDeathTest.
// PassUnsafeWriteableExecutableFile/2
if (is_fatal && !BUILDFLAG(IS_WIN)) {
base::ImmediateCrash();
}
} }
NotReachedError NotReachedError::NotReached(const char* file, int line) { NotReachedError NotReachedError::NotReached(const char* file, int line) {
@ -198,13 +212,47 @@ NotReachedError NotReachedError::NotReached(const char* file, int line) {
} }
void NotReachedError::TriggerNotReached() { void NotReachedError::TriggerNotReached() {
// TODO(pbos): Add back NotReachedError("", -1) here asap. This was removed to // This triggers a NOTREACHED() error as the returned NotReachedError goes out
// disable NOTREACHED() reports temporarily for M111 and should be added // of scope.
// back once this change has merged to M111. NotReached("", -1);
} }
NotReachedError::~NotReachedError() = default; NotReachedError::~NotReachedError() = default;
NotReachedNoreturnError::NotReachedNoreturnError(const char* file, int line)
: CheckError([file, line]() {
auto* const log_message = new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "NOTREACHED hit. ";
return log_message;
}()) {}
// Note: This function ends up in crash stack traces. If its full name changes,
// the crash server's magic signature logic needs to be updated. See
// cl/306632920.
NotReachedNoreturnError::~NotReachedNoreturnError() {
delete log_message_;
// Make sure we die if we haven't. LOG(FATAL) is not yet [[noreturn]] as of
// writing this.
base::ImmediateCrash();
}
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
is_dcheck ? new DCheckLogMessage(file, line, LOGGING_DCHECK)
: new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
void RawCheck(const char* message) { void RawCheck(const char* message) {
RawLog(LOGGING_FATAL, message); RawLog(LOGGING_FATAL, message);
} }

View File

@ -93,7 +93,7 @@ class BASE_EXPORT CheckError {
return stream() << streamed_type; return stream() << streamed_type;
} }
private: protected:
LogMessage* const log_message_; LogMessage* const log_message_;
}; };
@ -113,6 +113,15 @@ class BASE_EXPORT NotReachedError : public CheckError {
using CheckError::CheckError; using CheckError::CheckError;
}; };
// TODO(crbug.com/851128): This should take the name of the above class once all
// callers of NOTREACHED() have migrated to the CHECK-fatal version.
class BASE_EXPORT NotReachedNoreturnError : public CheckError {
public:
NotReachedNoreturnError(const char* file, int line);
[[noreturn]] NOMERGE NOINLINE NOT_TAIL_CALLED ~NotReachedNoreturnError();
};
// The 'switch' is used to prevent the 'else' from being ambiguous when the // The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as: // macro is used in an 'if' clause such as:
// if (a == 1) // if (a == 1)

View File

@ -76,19 +76,4 @@ char* StreamValToStr(const void* v,
return strdup(ss.str().c_str()); return strdup(ss.str().c_str());
} }
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
new LogMessage(file, line, is_dcheck ? LOGGING_DCHECK : LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
} // namespace logging } // namespace logging

View File

@ -64,7 +64,7 @@
// folding of multiple identical caller functions into a single signature. To // folding of multiple identical caller functions into a single signature. To
// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h. // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
// Use like: // Use like:
// void NOT_TAIL_CALLED FooBar(); // NOT_TAIL_CALLED void FooBar();
#if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called) #if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called)
#define NOT_TAIL_CALLED __attribute__((not_tail_called)) #define NOT_TAIL_CALLED __attribute__((not_tail_called))
#else #else

View File

@ -253,6 +253,15 @@ class EnumSet {
// Removes all values from our set. // Removes all values from our set.
void Clear() { enums_.reset(); } void Clear() { enums_.reset(); }
// Conditionally puts or removes `value`, based on `should_be_present`.
void PutOrRemove(E value, bool should_be_present) {
if (should_be_present) {
Put(value);
} else {
Remove(value);
}
}
// Returns true iff the given value is in range and a member of our set. // Returns true iff the given value is in range and a member of our set.
constexpr bool Has(E value) const { constexpr bool Has(E value) const {
return InRange(value) && enums_[ToIndex(value)]; return InRange(value) && enums_[ToIndex(value)];

View File

@ -250,7 +250,7 @@ class small_map {
inline explicit iterator(const typename NormalMap::iterator& init) inline explicit iterator(const typename NormalMap::iterator& init)
: array_iter_(nullptr), map_iter_(init) {} : array_iter_(nullptr), map_iter_(init) {}
raw_ptr<value_type> array_iter_; raw_ptr<value_type, AllowPtrArithmetic> array_iter_;
typename NormalMap::iterator map_iter_; typename NormalMap::iterator map_iter_;
}; };
@ -327,7 +327,7 @@ class small_map {
const typename NormalMap::const_iterator& init) const typename NormalMap::const_iterator& init)
: array_iter_(nullptr), map_iter_(init) {} : array_iter_(nullptr), map_iter_(init) {}
raw_ptr<const value_type> array_iter_; raw_ptr<const value_type, AllowPtrArithmetic> array_iter_;
typename NormalMap::const_iterator map_iter_; typename NormalMap::const_iterator map_iter_;
}; };

View File

@ -18,6 +18,7 @@
#include "base/containers/checked_iterators.h" #include "base/containers/checked_iterators.h"
#include "base/containers/contiguous_iterator.h" #include "base/containers/contiguous_iterator.h"
#include "base/cxx20_to_address.h" #include "base/cxx20_to_address.h"
#include "base/numerics/safe_math.h"
namespace base { namespace base {
@ -256,16 +257,16 @@ class GSL_POINTER span : public internal::ExtentStorage<Extent> {
template <typename It, template <typename It,
typename = internal::EnableIfCompatibleContiguousIterator<It, T>> typename = internal::EnableIfCompatibleContiguousIterator<It, T>>
constexpr span(It first, size_t count) noexcept constexpr span(It first, StrictNumeric<size_t> count) noexcept
: ExtentStorage(count), : ExtentStorage(count),
// The use of to_address() here is to handle the case where the iterator // The use of to_address() here is to handle the case where the iterator
// `first` is pointing to the container's `end()`. In that case we can // `first` is pointing to the container's `end()`. In that case we can
// not use the address returned from the iterator, or dereference it // not use the address returned from the iterator, or dereference it
// through the iterator's `operator*`, but we can store it. We must assume // through the iterator's `operator*`, but we can store it. We must
// in this case that `count` is 0, since the iterator does not point to // assume in this case that `count` is 0, since the iterator does not
// valid data. Future hardening of iterators may disallow pulling the // point to valid data. Future hardening of iterators may disallow
// address from `end()`, as demonstrated by asserts() in libstdc++: // pulling the address from `end()`, as demonstrated by asserts() in
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960. // libstdc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960.
// //
// The span API dictates that the `data()` is accessible when size is 0, // The span API dictates that the `data()` is accessible when size is 0,
// since the pointer may be valid, so we cannot prevent storing and // since the pointer may be valid, so we cannot prevent storing and
@ -473,7 +474,7 @@ as_writable_bytes(span<T, X> s) noexcept {
// Type-deducing helpers for constructing a span. // Type-deducing helpers for constructing a span.
template <int&... ExplicitArgumentBarrier, typename It> template <int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, size_t size) noexcept { constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>; using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T>(it, size); return span<T>(it, size);
} }
@ -508,7 +509,7 @@ constexpr auto make_span(Container&& container) noexcept {
// //
// Usage: auto static_span = base::make_span<N>(...); // Usage: auto static_span = base::make_span<N>(...);
template <size_t N, int&... ExplicitArgumentBarrier, typename It> template <size_t N, int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, size_t size) noexcept { constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>; using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T, N>(it, size); return span<T, N>(it, size);
} }

View File

@ -5,27 +5,13 @@
#ifndef BASE_CXX17_BACKPORTS_H_ #ifndef BASE_CXX17_BACKPORTS_H_
#define BASE_CXX17_BACKPORTS_H_ #define BASE_CXX17_BACKPORTS_H_
#include <functional> #include <algorithm>
#include "base/check.h"
namespace base { namespace base {
// C++14 implementation of C++17's std::clamp(): // TODO(crbug.com/1373621): Rewrite all uses of base::clamp as std::clamp and
// https://en.cppreference.com/w/cpp/algorithm/clamp // remove this file.
// Please note that the C++ spec makes it undefined behavior to call std::clamp using std::clamp;
// with a value of `lo` that compares greater than the value of `hi`. This
// implementation uses a CHECK to enforce this as a hard restriction.
template <typename T, typename Compare>
constexpr const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) {
CHECK(!comp(hi, lo));
return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
}
template <typename T>
constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
return base::clamp(v, lo, hi, std::less<T>{});
}
} // namespace base } // namespace base

View File

@ -1,5 +1,2 @@
# For activity tracking:
per-file activity_*=bcwhite@chromium.org
# For ASan integration: # For ASan integration:
per-file asan_service*=file://base/memory/MIRACLE_PTR_OWNERS per-file asan_service*=file://base/memory/MIRACLE_PTR_OWNERS

View File

@ -1,407 +0,0 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/debug/activity_analyzer.h"
#include <utility>
#include "base/check_op.h"
#include "base/containers/contains.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
namespace base {
namespace debug {
namespace {
const ActivityUserData::Snapshot& GetEmptyUserDataSnapshot() {
// An empty snapshot that can be returned when there otherwise is none.
static const NoDestructor<ActivityUserData::Snapshot> empty_snapshot;
return *empty_snapshot;
}
// DO NOT CHANGE VALUES. This is logged persistently in a histogram.
enum AnalyzerCreationError {
kInvalidMemoryMappedFile,
kPmaBadFile,
kPmaUninitialized,
kPmaDeleted,
kPmaCorrupt,
kAnalyzerCreationErrorMax // Keep this last.
};
void LogAnalyzerCreationError(AnalyzerCreationError error) {
UmaHistogramEnumeration("ActivityTracker.Collect.AnalyzerCreationError",
error, kAnalyzerCreationErrorMax);
}
} // namespace
ThreadActivityAnalyzer::Snapshot::Snapshot() = default;
ThreadActivityAnalyzer::Snapshot::~Snapshot() = default;
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
const ThreadActivityTracker& tracker)
: activity_snapshot_valid_(tracker.CreateSnapshot(&activity_snapshot_)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
: ThreadActivityAnalyzer(ThreadActivityTracker(base, size)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference)
: ThreadActivityAnalyzer(allocator->GetAsArray<char>(
reference,
GlobalActivityTracker::kTypeIdActivityTracker,
PersistentMemoryAllocator::kSizeAny),
allocator->GetAllocSize(reference)) {}
ThreadActivityAnalyzer::~ThreadActivityAnalyzer() = default;
void ThreadActivityAnalyzer::AddGlobalInformation(
GlobalActivityAnalyzer* global) {
if (!IsValid())
return;
// User-data is held at the global scope even though it's referenced at the
// thread scope.
activity_snapshot_.user_data_stack.clear();
for (auto& activity : activity_snapshot_.activity_stack) {
// The global GetUserDataSnapshot will return an empty snapshot if the ref
// or id is not valid.
activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
activity_snapshot_.process_id, activity.user_data_ref,
activity.user_data_id));
}
}
GlobalActivityAnalyzer::GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator)
: allocator_(std::move(allocator)),
analysis_stamp_(0LL),
allocator_iterator_(allocator_.get()) {
DCHECK(allocator_);
}
GlobalActivityAnalyzer::~GlobalActivityAnalyzer() = default;
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator) {
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
LogAnalyzerCreationError(kPmaUninitialized);
return nullptr;
}
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_DELETED) {
LogAnalyzerCreationError(kPmaDeleted);
return nullptr;
}
if (allocator->IsCorrupt()) {
LogAnalyzerCreationError(kPmaCorrupt);
return nullptr;
}
return std::make_unique<GlobalActivityAnalyzer>(std::move(allocator));
}
#if !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
const FilePath& file_path) {
// Map the file read-write so it can guarantee consistency between
// the analyzer and any trackers that my still be active.
std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
if (!mmfile->Initialize(file_path, MemoryMappedFile::READ_WRITE)) {
LogAnalyzerCreationError(kInvalidMemoryMappedFile);
return nullptr;
}
if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
LogAnalyzerCreationError(kPmaBadFile);
return nullptr;
}
return CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
std::move(mmfile), 0, 0, StringPiece(), /*readonly=*/true));
}
#endif // !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping) {
if (!mapping.IsValid() ||
!ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
mapping)) {
return nullptr;
}
return CreateWithAllocator(
std::make_unique<ReadOnlySharedPersistentMemoryAllocator>(
std::move(mapping), 0, StringPiece()));
}
ProcessId GlobalActivityAnalyzer::GetFirstProcess() {
PrepareAllAnalyzers();
return GetNextProcess();
}
ProcessId GlobalActivityAnalyzer::GetNextProcess() {
if (process_ids_.empty())
return 0;
ProcessId pid = process_ids_.back();
process_ids_.pop_back();
return pid;
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer(
ProcessId pid) {
analyzers_iterator_ = analyzers_.begin();
analyzers_iterator_pid_ = pid;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
int64_t create_stamp;
if (analyzers_iterator_->second->GetProcessId(&create_stamp) == pid &&
create_stamp <= analysis_stamp_) {
return analyzers_iterator_->second.get();
}
return GetNextAnalyzer();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetNextAnalyzer() {
DCHECK(analyzers_iterator_ != analyzers_.end());
int64_t create_stamp;
do {
++analyzers_iterator_;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
} while (analyzers_iterator_->second->GetProcessId(&create_stamp) !=
analyzers_iterator_pid_ ||
create_stamp > analysis_stamp_);
return analyzers_iterator_->second.get();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
const ThreadKey& key) {
auto found = analyzers_.find(key);
if (found == analyzers_.end())
return nullptr;
return found->second.get();
}
ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
ProcessId pid,
uint32_t ref,
uint32_t id) {
ActivityUserData::Snapshot snapshot;
void* memory = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdUserDataRecord,
PersistentMemoryAllocator::kSizeAny);
if (memory) {
size_t size = allocator_->GetAllocSize(ref);
const ActivityUserData user_data(memory, size);
user_data.CreateSnapshot(&snapshot);
ProcessId process_id;
int64_t create_stamp;
if (!ActivityUserData::GetOwningProcessId(memory, &process_id,
&create_stamp) ||
process_id != pid || user_data.id() != id) {
// This allocation has been overwritten since it was created. Return an
// empty snapshot because whatever was captured is incorrect.
snapshot.clear();
}
}
return snapshot;
}
const ActivityUserData::Snapshot&
GlobalActivityAnalyzer::GetProcessDataSnapshot(ProcessId pid) {
auto iter = process_data_.find(pid);
if (iter == process_data_.end())
return GetEmptyUserDataSnapshot();
if (iter->second.create_stamp > analysis_stamp_)
return GetEmptyUserDataSnapshot();
DCHECK_EQ(pid, iter->second.process_id);
return iter->second.data;
}
std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
std::vector<std::string> messages;
PersistentMemoryAllocator::Reference ref;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
while ((ref = iter.GetNextOfType(
GlobalActivityTracker::kTypeIdGlobalLogMessage)) != 0) {
const char* message = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdGlobalLogMessage,
PersistentMemoryAllocator::kSizeAny);
if (message)
messages.push_back(message);
}
return messages;
}
std::vector<GlobalActivityTracker::ModuleInfo>
GlobalActivityAnalyzer::GetModules(ProcessId pid) {
std::vector<GlobalActivityTracker::ModuleInfo> modules;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
const GlobalActivityTracker::ModuleInfoRecord* record;
while (
(record =
iter.GetNextOfObject<GlobalActivityTracker::ModuleInfoRecord>()) !=
nullptr) {
ProcessId process_id;
int64_t create_stamp;
if (!OwningProcess::GetOwningProcessId(&record->owner, &process_id,
&create_stamp) ||
pid != process_id || create_stamp > analysis_stamp_) {
continue;
}
GlobalActivityTracker::ModuleInfo info;
if (record->DecodeTo(&info, allocator_->GetAllocSize(
allocator_->GetAsReference(record)))) {
modules.push_back(std::move(info));
}
}
return modules;
}
GlobalActivityAnalyzer::ProgramLocation
GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
// This should be implemented but it's never been a priority.
return { 0, 0 };
}
bool GlobalActivityAnalyzer::IsDataComplete() const {
DCHECK(allocator_);
return !allocator_->IsFull();
}
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
const UserDataSnapshot& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
UserDataSnapshot&& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() = default;
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Record the time when analysis started.
analysis_stamp_ = base::Time::Now().ToInternalValue();
// Fetch all the records. This will retrieve only ones created since the
// last run since the PMA iterator will continue from where it left off.
uint32_t type;
PersistentMemoryAllocator::Reference ref;
while ((ref = allocator_iterator_.GetNext(&type)) != 0) {
switch (type) {
case GlobalActivityTracker::kTypeIdActivityTracker:
case GlobalActivityTracker::kTypeIdActivityTrackerFree:
case GlobalActivityTracker::kTypeIdProcessDataRecord:
case GlobalActivityTracker::kTypeIdProcessDataRecordFree:
case PersistentMemoryAllocator::kTypeIdTransitioning:
// Active, free, or transitioning: add it to the list of references
// for later analysis.
memory_references_.insert(ref);
break;
}
}
// Clear out any old information.
analyzers_.clear();
process_data_.clear();
process_ids_.clear();
std::set<ProcessId> seen_pids;
// Go through all the known references and create objects for them with
// snapshots of the current state.
for (PersistentMemoryAllocator::Reference memory_ref : memory_references_) {
// Get the actual data segment for the tracker. Any type will do since it
// is checked below.
void* const base = allocator_->GetAsArray<char>(
memory_ref, PersistentMemoryAllocator::kTypeIdAny,
PersistentMemoryAllocator::kSizeAny);
const size_t size = allocator_->GetAllocSize(memory_ref);
if (!base)
continue;
switch (allocator_->GetType(memory_ref)) {
case GlobalActivityTracker::kTypeIdActivityTracker: {
// Create the analyzer on the data. This will capture a snapshot of the
// tracker state. This can fail if the tracker is somehow corrupted or
// is in the process of shutting down.
std::unique_ptr<ThreadActivityAnalyzer> analyzer(
new ThreadActivityAnalyzer(base, size));
if (!analyzer->IsValid())
continue;
analyzer->AddGlobalInformation(this);
// Track PIDs.
ProcessId pid = analyzer->GetProcessId();
if (seen_pids.find(pid) == seen_pids.end()) {
process_ids_.push_back(pid);
seen_pids.insert(pid);
}
// Add this analyzer to the map of known ones, indexed by a unique
// thread
// identifier.
DCHECK(!base::Contains(analyzers_, analyzer->GetThreadKey()));
analyzer->allocator_reference_ = ref;
analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
} break;
case GlobalActivityTracker::kTypeIdProcessDataRecord: {
// Get the PID associated with this data record.
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
DCHECK(!base::Contains(process_data_, process_id));
// Create a snapshot of the data. This can fail if the data is somehow
// corrupted or the process shutdown and the memory being released.
UserDataSnapshot& snapshot = process_data_[process_id];
snapshot.process_id = process_id;
snapshot.create_stamp = create_stamp;
const ActivityUserData process_data(base, size);
if (!process_data.CreateSnapshot(&snapshot.data))
break;
// Check that nothing changed. If it did, forget what was recorded.
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
if (process_id != snapshot.process_id ||
create_stamp != snapshot.create_stamp) {
process_data_.erase(process_id);
break;
}
// Track PIDs.
if (seen_pids.find(process_id) == seen_pids.end()) {
process_ids_.push_back(process_id);
seen_pids.insert(process_id);
}
} break;
}
}
// Reverse the list of PIDs so that they get popped in the order found.
ranges::reverse(process_ids_);
}
} // namespace debug
} // namespace base

View File

@ -1,260 +0,0 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_DEBUG_ACTIVITY_ANALYZER_H_
#define BASE_DEBUG_ACTIVITY_ANALYZER_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/debug/activity_tracker.h"
#include "base/memory/shared_memory_mapping.h"
#include "build/build_config.h"
namespace base {
namespace debug {
class GlobalActivityAnalyzer;
// This class provides analysis of data captured from a ThreadActivityTracker.
// When created, it takes a snapshot of the data held by the tracker and
// makes that information available to other code.
class BASE_EXPORT ThreadActivityAnalyzer {
public:
struct BASE_EXPORT Snapshot : ThreadActivityTracker::Snapshot {
Snapshot();
~Snapshot();
// The user-data snapshot for an activity, matching the |activity_stack|
// of ThreadActivityTracker::Snapshot, if any.
std::vector<ActivityUserData::Snapshot> user_data_stack;
};
// This class provides keys that uniquely identify a thread, even across
// multiple processes.
class ThreadKey {
public:
ThreadKey(ProcessId pid, int64_t tid) : pid_(pid), tid_(tid) {}
bool operator<(const ThreadKey& rhs) const {
if (pid_ != rhs.pid_)
return pid_ < rhs.pid_;
return tid_ < rhs.tid_;
}
bool operator==(const ThreadKey& rhs) const {
return (pid_ == rhs.pid_ && tid_ == rhs.tid_);
}
private:
ProcessId pid_;
int64_t tid_;
};
// Creates an analyzer for an existing activity |tracker|. A snapshot is taken
// immediately and the tracker is not referenced again.
explicit ThreadActivityAnalyzer(const ThreadActivityTracker& tracker);
// Creates an analyzer for a block of memory currently or previously in-use
// by an activity-tracker. A snapshot is taken immediately and the memory
// is not referenced again.
ThreadActivityAnalyzer(void* base, size_t size);
// Creates an analyzer for a block of memory held within a persistent-memory
// |allocator| at the given |reference|. A snapshot is taken immediately and
// the memory is not referenced again.
ThreadActivityAnalyzer(PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference);
ThreadActivityAnalyzer(const ThreadActivityAnalyzer&) = delete;
ThreadActivityAnalyzer& operator=(const ThreadActivityAnalyzer&) = delete;
~ThreadActivityAnalyzer();
// Adds information from the global analyzer.
void AddGlobalInformation(GlobalActivityAnalyzer* global);
// Returns true iff the contained data is valid. Results from all other
// methods are undefined if this returns false.
bool IsValid() { return activity_snapshot_valid_; }
// Gets the process id and its creation stamp.
ProcessId GetProcessId(int64_t* out_stamp = nullptr) {
if (out_stamp)
*out_stamp = activity_snapshot_.create_stamp;
return activity_snapshot_.process_id;
}
// Gets the name of the thread.
const std::string& GetThreadName() {
return activity_snapshot_.thread_name;
}
// Gets the TheadKey for this thread.
ThreadKey GetThreadKey() {
return ThreadKey(activity_snapshot_.process_id,
activity_snapshot_.thread_id);
}
const Snapshot& activity_snapshot() { return activity_snapshot_; }
private:
friend class GlobalActivityAnalyzer;
// The snapshot of the activity tracker taken at the moment of construction.
Snapshot activity_snapshot_;
// Flag indicating if the snapshot data is valid.
bool activity_snapshot_valid_;
// A reference into a persistent memory allocator, used by the global
// analyzer to know where this tracker came from.
PersistentMemoryAllocator::Reference allocator_reference_ = 0;
};
// This class manages analyzers for all known processes and threads as stored
// in a persistent memory allocator. It supports retrieval of them through
// iteration and directly using a ThreadKey, which allows for cross-references
// to be resolved.
// Note that though atomic snapshots are used and everything has its snapshot
// taken at the same time, the multi-snapshot itself is not atomic and thus may
// show small inconsistencies between threads if attempted on a live system.
class BASE_EXPORT GlobalActivityAnalyzer {
public:
struct ProgramLocation {
int module;
uintptr_t offset;
};
using ThreadKey = ThreadActivityAnalyzer::ThreadKey;
// Creates a global analyzer from a persistent memory allocator.
explicit GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator);
GlobalActivityAnalyzer(const GlobalActivityAnalyzer&) = delete;
GlobalActivityAnalyzer& operator=(const GlobalActivityAnalyzer&) = delete;
~GlobalActivityAnalyzer();
// Creates a global analyzer using a given persistent-memory |allocator|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator);
#if !BUILDFLAG(IS_NACL)
// Creates a global analyzer using the contents of a file given in
// |file_path|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithFile(
const FilePath& file_path);
#endif // !BUILDFLAG(IS_NACL)
// Like above but accesses an allocator in a mapped shared-memory segment.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping);
// Iterates over all known valid processes and returns their PIDs or zero
// if there are no more. Calls to GetFirstProcess() will perform a global
// snapshot in order to provide a relatively consistent state across the
// future calls to GetNextProcess() and GetFirst/NextAnalyzer(). PIDs are
// returned in the order they're found meaning that a first-launched
// controlling process will be found first. Note, however, that space
// freed by an exiting process may be re-used by a later process.
ProcessId GetFirstProcess();
ProcessId GetNextProcess();
// Iterates over all known valid analyzers for the a given process or returns
// null if there are no more.
//
// GetFirstProcess() must be called first in order to capture a global
// snapshot! Ownership stays with the global analyzer object and all existing
// analyzer pointers are invalidated when GetFirstProcess() is called.
ThreadActivityAnalyzer* GetFirstAnalyzer(ProcessId pid);
ThreadActivityAnalyzer* GetNextAnalyzer();
// Gets the analyzer for a specific thread or null if there is none.
// Ownership stays with the global analyzer object.
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
// Extract user data based on a reference and its identifier.
ActivityUserData::Snapshot GetUserDataSnapshot(ProcessId pid,
uint32_t ref,
uint32_t id);
// Extract the data for a specific process. An empty snapshot will be
// returned if the process is not known.
const ActivityUserData::Snapshot& GetProcessDataSnapshot(ProcessId pid);
// Gets all log messages stored within.
std::vector<std::string> GetLogMessages();
// Gets modules corresponding to a pid. This pid must come from a call to
// GetFirst/NextProcess. Only modules that were first registered prior to
// GetFirstProcess's snapshot are returned.
std::vector<GlobalActivityTracker::ModuleInfo> GetModules(ProcessId pid);
// Gets the corresponding "program location" for a given "program counter".
// This will return {0,0} if no mapping could be found.
ProgramLocation GetProgramLocationFromAddress(uint64_t address);
// Returns whether the data is complete. Data can be incomplete if the
// recording size quota is hit.
bool IsDataComplete() const;
private:
using AnalyzerMap =
std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
struct UserDataSnapshot {
// Complex class needs out-of-line ctor/dtor.
UserDataSnapshot();
UserDataSnapshot(const UserDataSnapshot& rhs);
UserDataSnapshot(UserDataSnapshot&& rhs);
~UserDataSnapshot();
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::Snapshot data;
};
// Finds, creates, and indexes analyzers for all known processes and threads.
void PrepareAllAnalyzers();
// The persistent memory allocator holding all tracking data.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
// The time stamp when analysis began. This is used to prevent looking into
// process IDs that get reused when analyzing a live system.
int64_t analysis_stamp_;
// The iterator for finding tracking information in the allocator.
PersistentMemoryAllocator::Iterator allocator_iterator_;
// A set of all interesting memory references found within the allocator.
std::set<PersistentMemoryAllocator::Reference> memory_references_;
// A set of all process-data memory references found within the allocator.
std::map<ProcessId, UserDataSnapshot> process_data_;
// A set of all process IDs collected during PrepareAllAnalyzers. These are
// popped and returned one-by-one with calls to GetFirst/NextProcess().
std::vector<ProcessId> process_ids_;
// A map, keyed by ThreadKey, of all valid activity analyzers.
AnalyzerMap analyzers_;
// The iterator within the analyzers_ map for returning analyzers through
// first/next iteration.
AnalyzerMap::iterator analyzers_iterator_;
ProcessId analyzers_iterator_pid_;
};
} // namespace debug
} // namespace base
#endif // BASE_DEBUG_ACTIVITY_ANALYZER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@
#include <string.h> #include <string.h>
#include <sys/param.h> #include <sys/param.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h> #include <sys/types.h>
#include <unistd.h> #include <unistd.h>
@ -300,6 +301,27 @@ void PrintToStderr(const char* output) {
std::ignore = HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))); std::ignore = HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output)));
} }
#if BUILDFLAG(IS_LINUX)
void AlarmSignalHandler(int signal, siginfo_t* info, void* void_context) {
// We have seen rare cases on AMD linux where the default signal handler
// either does not run or a thread (Probably an AMD driver thread) prevents
// the termination of the gpu process. We catch this case when the alarm fires
// and then call exit_group() to kill all threads of the process. This has
// resolved the zombie gpu process issues we have seen on our context lost
// test.
// Note that many different calls were tried to kill the process when it is in
// this state. Only 'exit_group' was found to cause termination and it is
// speculated that only this works because only this exit kills all threads in
// the process (not simply the current thread).
// See: http://crbug.com/1396451.
PrintToStderr(
"Warning: Default signal handler failed to terminate process.\n");
PrintToStderr("Calling exit_group() directly to prevent timeout.\n");
// See: https://man7.org/linux/man-pages/man2/exit_group.2.html
syscall(SYS_exit_group, EXIT_FAILURE);
}
#endif // BUILDFLAG(IS_LINUX)
void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
// NOTE: This code MUST be async-signal safe. // NOTE: This code MUST be async-signal safe.
// NO malloc or stdio is allowed here. // NO malloc or stdio is allowed here.
@ -520,11 +542,27 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
PrintToStderr( PrintToStderr(
"Calling _exit(EXIT_FAILURE). Core file will not be generated.\n"); "Calling _exit(EXIT_FAILURE). Core file will not be generated.\n");
_exit(EXIT_FAILURE); _exit(EXIT_FAILURE);
#endif // !BUILDFLAG(IS_LINUX) #else // BUILDFLAG(IS_LINUX)
// After leaving this handler control flow returns to the point where the // After leaving this handler control flow returns to the point where the
// signal was raised, raising the current signal once again but executing the // signal was raised, raising the current signal once again but executing the
// default handler instead of this one. // default handler instead of this one.
// Set an alarm to trigger in case the default handler does not terminate
// the process. See 'AlarmSignalHandler' for more details.
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_flags = static_cast<int>(SA_RESETHAND);
action.sa_sigaction = &AlarmSignalHandler;
sigemptyset(&action.sa_mask);
sigaction(SIGALRM, &action, nullptr);
// 'alarm' function is signal handler safe.
// https://man7.org/linux/man-pages/man7/signal-safety.7.html
// This delay is set to be long enough for the real signal handler to fire but
// shorter than chrome's process watchdog timer.
constexpr unsigned int kAlarmSignalDelaySeconds = 5;
alarm(kAlarmSignalDelaySeconds);
#endif // !BUILDFLAG(IS_LINUX)
} }
class PrintBacktraceOutputHandler : public BacktraceOutputHandler { class PrintBacktraceOutputHandler : public BacktraceOutputHandler {

View File

@ -668,7 +668,12 @@ FeatureList::OverrideState FeatureList::GetOverrideState(
const Feature& feature) const { const Feature& feature) const {
DCHECK(initialized_); DCHECK(initialized_);
DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name; DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
DCHECK(CheckFeatureIdentity(feature)) << feature.name; DCHECK(CheckFeatureIdentity(feature))
<< feature.name
<< " has multiple definitions. Either it is defined more than once in "
"code or (for component builds) the code is built into multiple "
"components (shared libraries) without a corresponding export "
"statement";
// If caching is disabled, always perform the full lookup. // If caching is disabled, always perform the full lookup.
if (!g_cache_override_state) if (!g_cache_override_state)

View File

@ -13,7 +13,7 @@ namespace base::features {
// backed by executable files. // backed by executable files.
BASE_FEATURE(kEnforceNoExecutableFileHandles, BASE_FEATURE(kEnforceNoExecutableFileHandles,
"EnforceNoExecutableFileHandles", "EnforceNoExecutableFileHandles",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_ENABLED_BY_DEFAULT);
// Optimizes parsing and loading of data: URLs. // Optimizes parsing and loading of data: URLs.
BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT); BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT);

View File

@ -75,9 +75,7 @@ class BASE_EXPORT FileDescriptorWatcher {
// Controller is deleted, ownership of |watcher_| is transfered to a delete // Controller is deleted, ownership of |watcher_| is transfered to a delete
// task posted to the MessageLoopForIO. This ensures that |watcher_| isn't // task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
// deleted while it is being used by the MessageLoopForIO. // deleted while it is being used by the MessageLoopForIO.
// raw_ptr<Watcher, DanglingUntriaged> watcher_;
// TODO(crbug.com/1298696): Breaks base_unittests.
raw_ptr<Watcher, DanglingUntriagedDegradeToNoOpWhenMTE> watcher_;
// An event for the watcher to notify controller that it's destroyed. // An event for the watcher to notify controller that it's destroyed.
// As the |watcher_| is owned by Controller, always outlives the Watcher. // As the |watcher_| is owned by Controller, always outlives the Watcher.

View File

@ -350,9 +350,9 @@ class BASE_EXPORT FilePath {
// Returns a FilePath by appending a separator and the supplied path // Returns a FilePath by appending a separator and the supplied path
// component to this object's path. Append takes care to avoid adding // component to this object's path. Append takes care to avoid adding
// excessive separators if this object's path already ends with a separator. // excessive separators if this object's path already ends with a separator.
// If this object's path is kCurrentDirectory, a new FilePath corresponding // If this object's path is kCurrentDirectory ('.'), a new FilePath
// only to |component| is returned. |component| must be a relative path; // corresponding only to |component| is returned. |component| must be a
// it is an error to pass an absolute path. // relative path; it is an error to pass an absolute path.
[[nodiscard]] FilePath Append(StringPieceType component) const; [[nodiscard]] FilePath Append(StringPieceType component) const;
[[nodiscard]] FilePath Append(const FilePath& component) const; [[nodiscard]] FilePath Append(const FilePath& component) const;
[[nodiscard]] FilePath Append(const SafeBaseName& component) const; [[nodiscard]] FilePath Append(const SafeBaseName& component) const;

View File

@ -46,11 +46,11 @@ class BASE_EXPORT FilePathWatcher {
// within the directory are watched. // within the directory are watched.
kRecursive, kRecursive,
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_APPLE)
// Indicates that the watcher should watch the given path only (neither // Indicates that the watcher should watch the given path only (neither
// ancestors nor descendants). The watch fails if the path does not exist. // ancestors nor descendants). The watch fails if the path does not exist.
kTrivial, kTrivial,
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(IS_APPLE)
}; };
// Flags are a generalization of |Type|. They are used in the new // Flags are a generalization of |Type|. They are used in the new

View File

@ -390,14 +390,24 @@ bool IsPathSafeToSetAclOn(const FilePath& path) {
if (g_extra_allowed_path_for_no_execute) { if (g_extra_allowed_path_for_no_execute) {
valid_paths.push_back(g_extra_allowed_path_for_no_execute); valid_paths.push_back(g_extra_allowed_path_for_no_execute);
} }
// MakeLongFilePath is needed here because temp files can have an 8.3 path
// under certain conditions. See comments in base::MakeLongFilePath.
base::FilePath long_path = base::MakeLongFilePath(path);
DCHECK(!long_path.empty()) << "Cannot get long path for " << path;
for (const auto path_type : valid_paths) { for (const auto path_type : valid_paths) {
base::FilePath valid_path; base::FilePath valid_path;
if (base::PathService::Get(path_type, &valid_path)) { if (!base::PathService::Get(path_type, &valid_path)) {
// Temp files can sometimes have an 8.3 path. See comments in DLOG(FATAL) << "Cannot get path for pathservice key " << path_type;
// base::MakeLongFilePath. continue;
if (base::MakeLongFilePath(valid_path).IsParent(path)) { }
return true; // Temp files can sometimes have an 8.3 path. See comments in
} // base::MakeLongFilePath.
base::FilePath full_path = base::MakeLongFilePath(valid_path);
DCHECK(!full_path.empty()) << "Cannot get long path for " << valid_path;
if (full_path.IsParent(long_path)) {
return true;
} }
} }
return false; return false;
@ -1100,9 +1110,7 @@ bool PreventExecuteMapping(const FilePath& path) {
return true; return true;
} }
// MakeLongFilePath is needed here because temp files can have an 8.3 path bool is_path_safe = IsPathSafeToSetAclOn(path);
// under certain conditions. See comments in base::MakeLongFilePath.
bool is_path_safe = IsPathSafeToSetAclOn(base::MakeLongFilePath(path));
if (!is_path_safe) { if (!is_path_safe) {
// To mitigate the effect of past OS bugs where attackers are able to use // To mitigate the effect of past OS bugs where attackers are able to use
@ -1137,7 +1145,7 @@ bool PreventExecuteMapping(const FilePath& path) {
// dangerous path is being passed to a renderer, which is inherently unsafe. // dangerous path is being passed to a renderer, which is inherently unsafe.
// //
// If this check hits, please do not ignore it but consult security team. // If this check hits, please do not ignore it but consult security team.
NOTREACHED() << "Unsafe to deny execute access to path : " << path; DLOG(FATAL) << "Unsafe to deny execute access to path : " << path;
return false; return false;
} }

View File

@ -457,6 +457,16 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE)) else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
created_ = true; created_ = true;
if (flags & FLAG_WIN_NO_EXECUTE) { if (flags & FLAG_WIN_NO_EXECUTE) {
// These two DCHECKs make sure that no callers are trying to remove
// execute permission from a file that might need to be mapped executable
// later. If they hit in code then the file should not have
// FLAG_WIN_NO_EXECUTE flag, but this will mean that the file cannot be
// passed to renderers.
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".exe"),
path.Extension()));
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".dll"),
path.Extension()));
// It is possible that the ACE could not be added if the file was created // It is possible that the ACE could not be added if the file was created
// in a path for which the caller does not have WRITE_DAC access. In this // in a path for which the caller does not have WRITE_DAC access. In this
// case, ignore the error since if this is occurring then it's likely the // case, ignore the error since if this is occurring then it's likely the

Some files were not shown because too many files have changed in this diff Show More