Compare commits

...

No commits in common. "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" and "12a6ba324f8bc147f3f7ba299e9264e68d2d1a25" have entirely different histories.

1793 changed files with 43311 additions and 28694 deletions

View File

@ -1 +1 @@
112.0.5615.49 111.0.5563.64

View File

@ -126,7 +126,6 @@ Anton Obzhirov <a.obzhirov@samsung.com>
Antonin Hildebrand <antonin.hildebrand@gmail.com> Antonin Hildebrand <antonin.hildebrand@gmail.com>
Antonio Gomes <a1.gomes@sisa.samsung.com> Antonio Gomes <a1.gomes@sisa.samsung.com>
Anuj Kumar Sharma <anujk.sharma@samsung.com> Anuj Kumar Sharma <anujk.sharma@samsung.com>
Ao Hui <aohui.wan@gmail.com>
Ao Sun <ntusunao@gmail.com> Ao Sun <ntusunao@gmail.com>
Ao Wang <wangao.james@bytedance.com> Ao Wang <wangao.james@bytedance.com>
Aquibuzzaman Md. Sayem <md.sayem@samsung.com> Aquibuzzaman Md. Sayem <md.sayem@samsung.com>
@ -278,7 +277,6 @@ Daniel Waxweiler <daniel.waxweiler@gmail.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu> Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu> Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com> Daniil Suvorov <severecloud@gmail.com>
Danny Weiss <danny.weiss.fr@gmail.com>
Daoming Qiu <daoming.qiu@intel.com> Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com> Darik Harter <darik.harter@gmail.com>
Darshan Sen <raisinten@gmail.com> Darshan Sen <raisinten@gmail.com>
@ -378,7 +376,6 @@ Feifei Wang <alexswang@tencent.com>
Felipe Erias Morandeira <felipeerias@gmail.com> Felipe Erias Morandeira <felipeerias@gmail.com>
Felix H. Dahlke <fhd@ubercode.de> Felix H. Dahlke <fhd@ubercode.de>
Felix Weilbach <feweilbach@gmail.com> Felix Weilbach <feweilbach@gmail.com>
Feng Shengyuan <fengshengyuan@agora.io>
Feng Yu <f3n67u@gmail.com> Feng Yu <f3n67u@gmail.com>
Fengrong Fang <fr.fang@samsung.com> Fengrong Fang <fr.fang@samsung.com>
Fernando Jiménez Moreno <ferjmoreno@gmail.com> Fernando Jiménez Moreno <ferjmoreno@gmail.com>
@ -608,7 +605,6 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com> John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com> John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com> Johnson Lin <johnson.lin@intel.com>
Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com> Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me> Jonathan Garbee <jonathan@garbee.me>
Jonathan Hacker <jhacker@arcanefour.com> Jonathan Hacker <jhacker@arcanefour.com>
@ -644,7 +640,6 @@ Julien Isorce <j.isorce@samsung.com>
Julien Racle <jracle@logitech.com> Julien Racle <jracle@logitech.com>
Jun Fang <jun_fang@foxitsoftware.com> Jun Fang <jun_fang@foxitsoftware.com>
Jun Jiang <jun.a.jiang@intel.com> Jun Jiang <jun.a.jiang@intel.com>
Junbong Eom <jb.eom@samsung.com>
Jungchang Park <valley84265@gmail.com> Jungchang Park <valley84265@gmail.com>
Junchao Han <junchao.han@intel.com> Junchao Han <junchao.han@intel.com>
Junghoon Lee <sjh836@gmail.com> Junghoon Lee <sjh836@gmail.com>
@ -745,7 +740,6 @@ Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com> Li Yin <li.yin@intel.com>
Lidwine Genevet <lgenevet@cisco.com> Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com> Lin Sun <lin.sun@intel.com>
Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com> Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com> Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com> Lingyun Cai <lingyun.cai@intel.com>
@ -768,7 +762,7 @@ Luke Seunghoe Gu <gulukesh@gmail.com>
Luke Zarko <lukezarko@gmail.com> Luke Zarko <lukezarko@gmail.com>
Luoxi Pan <l.panpax@gmail.com> Luoxi Pan <l.panpax@gmail.com>
Lu Yahan <yahan@iscas.ac.cn> Lu Yahan <yahan@iscas.ac.cn>
Ma Aiguo <imaiguo@gmail.com> Ma Aiguo <maaiguo@uniontech.com>
Maarten Lankhorst <m.b.lankhorst@gmail.com> Maarten Lankhorst <m.b.lankhorst@gmail.com>
Maciej Pawlowski <m.pawlowski@eyeo.com> Maciej Pawlowski <m.pawlowski@eyeo.com>
Magnus Danielsson <fuzzac@gmail.com> Magnus Danielsson <fuzzac@gmail.com>
@ -921,7 +915,6 @@ Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com> Nolan Cao <nolan.robin.cao@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com> Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com> Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net>
Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com> Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com>
Omar Sandoval <osandov@osandov.com> Omar Sandoval <osandov@osandov.com>
Owen Yuwono <owenyuwono@gmail.com> Owen Yuwono <owenyuwono@gmail.com>
@ -1206,7 +1199,6 @@ Suyash Nayan <suyashnyn1@gmail.com>
Suyash Sengar <suyash.s@samsung.com> Suyash Sengar <suyash.s@samsung.com>
Swarali Raut <swarali.sr@samsung.com> Swarali Raut <swarali.sr@samsung.com>
Swati Jaiswal <swa.jaiswal@samsung.com> Swati Jaiswal <swa.jaiswal@samsung.com>
Syed Wajid <syed.wajid@samsung.com>
Sylvain Zimmer <sylvinus@gmail.com> Sylvain Zimmer <sylvinus@gmail.com>
Sylvestre Ledru <sylvestre.ledru@gmail.com> Sylvestre Ledru <sylvestre.ledru@gmail.com>
Synthia Islam <synthia.is@samsung.com> Synthia Islam <synthia.is@samsung.com>

358
src/DEPS
View File

@ -229,7 +229,7 @@ vars = {
# #
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '15357.0.0', 'lacros_sdk_version': '15326.0.0',
# Generate location tag metadata to include in tests result data uploaded # Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates # to ResultDB. This isn't needed on some configs and the tool that generates
@ -241,7 +241,7 @@ vars = {
# luci-go CIPD package version. # luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder. # Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:8a8b4f2ea65c7ff5fde8a0c522008aed78d42d9d', 'luci_go': 'git_revision:221383f749a2c5b8587449d3d2e4982857daa9e7',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD # This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision. # instead of downloading the prebuilt pinned revision.
@ -286,6 +286,11 @@ vars = {
# Rust toolchain. # Rust toolchain.
'checkout_rust_toolchain_deps': False, 'checkout_rust_toolchain_deps': False,
# The Rust toolchain sources. It is a version tag from an instance of the
# CIPD `chromium/third_party/rust_src` package. This field is written by
# //tools/clang/scripts/upload_revision.py and shouldn't be updated by hand.
'rust_toolchain_version': 'version:2@2022-12-09',
'android_git': 'https://android.googlesource.com', 'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com', 'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com', 'boringssl_git': 'https://boringssl.googlesource.com',
@ -299,34 +304,34 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia # the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other. # and whatever else without interference from each other.
'skia_revision': 'f5fefe5245098be43cb608eace5e14d67cdc09e6', 'skia_revision': '59932b057f281ddaeb0926ecfac55486270f8c51',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8 # the commit queue can handle CLs rolling V8
# and whatever else without interference from each other. # and whatever else without interference from each other.
'v8_revision': '96fed67922e5f54a027aed80259e5083769e33e2', 'v8_revision': '1cee747760b14aa78503a22ba1a3ab97b968fa28',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE # the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other. # and whatever else without interference from each other.
'angle_revision': '293db5ce4d0766cb3ba7711057a00f0a5bddb00d', 'angle_revision': 'cd45d155bf4cf7404061f37e974a048914ca4610',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader # the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other. # and whatever else without interference from each other.
'swiftshader_revision': '3575b5479af54e471ea6750a8585e2c9bc87801c', 'swiftshader_revision': 'aae98adc2222dcada4aa952cccad48ab08e34004',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium # the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other. # and whatever else without interference from each other.
'pdfium_revision': '4090d4c0f9873f5f50b630c26c2439b5297a6e49', 'pdfium_revision': 'd087df316170b2d8757487b1015244384624478e',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL # the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other. # and whatever else without interference from each other.
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': 'ca1690e221677cea3fb946f324eb89d846ec53f2', 'boringssl_revision': '45b8d7bbd771cbf7e116db2ba1f1cc7af959497e',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk # the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other. # and whatever else without interference from each other.
'fuchsia_version': 'version:11.20230223.1.1', 'fuchsia_version': 'version:11.20230126.1.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -346,11 +351,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl # the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nacl_revision': '417b413dbe94a861ee050d42daf945cca02dba11', 'nacl_revision': '5b530a9ffd34be8541037262cf47cf3924bfc973',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_revision': '3f01161ff22c84c371b6dc3b5e0351e0d6e8e771', 'freetype_revision': '82ce172669f132839debe6e50a3a53fe88db9e31',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -370,23 +375,15 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult # the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other. # and whatever else without interference from each other.
'catapult_revision': '4274fe29dac91b7713244daebb6f1d2364d97193', 'catapult_revision': 'abaad53f0c4e104ab630b314af2902ad83b82c8c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling CrossBench
# and whatever else without interference from each other.
'crossbench_revision': '27639d495e1cec411073bc82ba1fe368ce0ca89a',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer # the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libfuzzer_revision': 'debe7d2d1982e540fbd6bd78604bf001753f9e74', 'libfuzzer_revision': 'debe7d2d1982e540fbd6bd78604bf001753f9e74',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling centipede
# and whatever else without interference from each other.
'centipede_revision': '63ed43d2bfa2c04e291e81d643a5581c9be90c53',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend # the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other. # and whatever else without interference from each other.
'devtools_frontend_revision': '2436ae2c5444ba8008a9f092301209a87aef0483', 'devtools_frontend_revision': 'f0bf0ece4aae3192fa2f0f2859f668cb343791be',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator # the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -414,10 +411,6 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version # the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other. # and whatever else without interference from each other.
'android_sdk_platforms_preview_version': 'YWMYkzyxGBgVsty0GhXL1oxbY0pGXQIgFc0Rh7ZMRPYC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC', 'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version # the commit queue can handle CLs rolling android_sdk_sources_version
@ -426,11 +419,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'dawn_revision': 'de24841411c4cfb13662238327f2e456c82d26f6', 'dawn_revision': 'e5193f1ffd232ebf7adfd403114edde08d162663',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'quiche_revision': '40c87d454d762f3daaeca334cd2dc962f0476b13', 'quiche_revision': '566b33c9fa5b1723db04be3d40dcaf102344c323',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit # the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -450,7 +443,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby # the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nearby_revision': 'd260feced56cfdea53f941883c250d635ed6064d', 'nearby_revision': '26973fada5175060db140d7e1157cce6b604dc6a',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage # the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -462,19 +455,19 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'cros_components_revision': 'fb2448fc618b4e3634c8c4097b4a84fcfa34c540', 'cros_components_revision': '5e449ecf7311cba83cdcfc1b2ae449f40d22f29e',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'resultdb_version': 'git_revision:ebc74d10fa0d64057daa6f128e89f3672eeeec95', 'resultdb_version': 'git_revision:ac21cf9883af7d1bf6d60e8a7448918eb3b6aa18',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libcxxabi_revision': '87d8fe050bedb143f232b9ff99a0a46897640e5d', 'libcxxabi_revision': 'd520d582aa710cc0a4635620c02c5dbc187deb4f',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libunwind_revision': 'c5e861c7b48ee121d3719b7b5c14175c47ec5cc9', 'libunwind_revision': 'e95b94b74d26f8387d4fb03a687a2fab0ed8e91c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -482,18 +475,18 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'highway_revision': '58746ca5b9f9444a2a3549704602ecc6239f8f41', 'highway_revision': '8ae5b88670fb918f815b717c7c13d38a9b0eb4bb',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg # the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other. # and whatever else without interference from each other.
'ffmpeg_revision': 'ee0c52d52036ecadfd38affec86c04937480bedb', 'ffmpeg_revision': 'a249b21db6516234e5456716ae074fbb00176b3f',
# If you change this, also update the libc++ revision in # If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni. # //buildtools/deps_revisions.gni.
'libcxx_revision': 'e136ec5032a5e5d97e988ce66e8c269a80ff54c4', 'libcxx_revision': '1127c78cf90cf253be614a1e1d3645da57edbeb4',
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:b25a2f8c2d33f02082f0f258350f5e22c0973108', 'gn_version': 'git_revision:5e19d2fb166fbd4f6f32147fbb2f497091a54ad8',
# ninja CIPD package version. # ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -586,6 +579,19 @@ deps = {
'condition': 'host_os == "linux"', 'condition': 'host_os == "linux"',
}, },
# Rust sources are used to build the Rust standard library, and on toolchain
# build bots, to build the Rust toolchain.
'src/third_party/rust_src/src': {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': Var('rust_toolchain_version'),
},
],
'dep_type': 'cipd',
'condition': 'checkout_rust_toolchain_deps or use_rust',
},
# We don't know target_cpu at deps time. At least until there's a universal # We don't know target_cpu at deps time. At least until there's a universal
# binary of httpd-php, pull both intel and arm versions in DEPS and then pick # binary of httpd-php, pull both intel and arm versions in DEPS and then pick
# the right one at runtime. # the right one at runtime.
@ -625,7 +631,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/android_webview/tools/cts_archive', 'package': 'chromium/android_webview/tools/cts_archive',
'version': 'APYMYksv9eNAkU6osFvNXN38pMO1Q1kScjeecePr7NgC', 'version': 'C4m9-gZKvvtS0-KQ9cMRi6wEfZH_TeWSiXEgsgtGtsMC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -648,7 +654,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_mac_universal', 'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': 'u1XGTm7703jO-Ax33P8j-x_KijOeHd36aBA6SO8V3a8C', 'version': '1c74wqdugvZKRLfu6oY7wkYQ_VpIAObl_7TDwLQw8w4C',
}, },
], ],
} }
@ -659,7 +665,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_mac_universal_prod', 'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'qDhUmRj82uhWqE2eVqt12e1eJKWKgRpRjgQrBSrdyP4C', 'version': 'aFiR8jLJBXsy6aYQhQp8cd9yBEmqa_cJZwx0ltJbKT4C',
}, },
], ],
}, },
@ -670,7 +676,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86', 'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'RmZn_R0BOPSbruD15DEq9pfu5IhhtjoJX6z-ufrWnD4C', 'version': 'G5yIDMjjCL2TyjU-EmLubZkkb1sLM0XdZ5zB1XmviQkC',
}, },
], ],
}, },
@ -681,7 +687,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86_64', 'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'AAes70A2b8-CLhU1h9Sh1x2K-N3WjG7j2Tlp6VOgmnQC', 'version': 'LHw1kjfI3H94qB22t7YsgnMQyXBBgxCgtub5F-GxSCEC',
}, },
], ],
}, },
@ -693,7 +699,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_amd64', 'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': '5ui7_fqpvI7a8omWqM8iyD0PZFPJpYKoMHkAb6xA_TkC', 'version': 'Rd4fkayO0xqiXgM-WjFwSTh1YECDXF5ZfcSLW_odlz0C',
}, },
], ],
}, },
@ -705,7 +711,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_arm64', 'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': '0KnizXQ2_n_V3aEHR240LqMKw7b0-Pm9VBUmVuQh0cAC', 'version': 'UoyUoEkFS37BkNrD1mNVfqdDlPujDLGwaGBdWz7bPakC',
}, },
], ],
}, },
@ -716,7 +722,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86', 'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'g_24x4tVzQIoveectPGIXIGc2ptYDTnOodXieF_OG_4C', 'version': 'Sl2g34_swdY9lIDQA6pTzPSTM5tec284DtwYekj9BIwC',
}, },
], ],
}, },
@ -727,18 +733,18 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86_64', 'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 's6U9lpJZsILIKkP5bCc_QqvJjPW2MwMWg5IoCBt_YEYC', 'version': 'vnZtwC2H42TkFgDMIYizwAUGFXTMOm00-yjnB6gnXigC',
}, },
], ],
}, },
'src/chrome/test/data/autofill/captured_sites/artifacts': { 'src/chrome/test/data/autofill/captured_sites': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + 'a38d810c87a18582e986b94650d9cfa4b67be12c', 'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + '58a7920c173397b57d8d7be95cb93c2b43d02e26',
'condition': 'checkout_chromium_autofill_test_dependencies', 'condition': 'checkout_chromium_autofill_test_dependencies',
}, },
'src/chrome/test/data/password/captured_sites/artifacts': { 'src/chrome/test/data/password/captured_sites': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '04b3ea663adf745c52831650e2426b54bc94e65d', 'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '93dfc8b7199a285a2ed832e607b0e68c5544273a',
'condition': 'checkout_chromium_password_manager_test_dependencies', 'condition': 'checkout_chromium_password_manager_test_dependencies',
}, },
@ -764,21 +770,21 @@ deps = {
'src/clank': { 'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' + 'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'd83811f32343245218e742319cec89aaefb94657', '7d4e93f3d1693f1dfe471527e93a8e729ce149a3',
'condition': 'checkout_android and checkout_src_internal', 'condition': 'checkout_android and checkout_src_internal',
}, },
'src/docs/website': { 'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '182a630499559e1403237d14e2bc6302d384ed39', 'url': Var('chromium_git') + '/website.git' + '@' + '7e351332addd1fca691bb524c976a56f17e3eb95',
}, },
'src/ios/third_party/earl_grey2/src': { 'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '8ac47627cb9ba09bf4bc3489b7aff5d77cd6eb88', 'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '99ba3b6ed7b8489899f06a0d602e84fc657e8338',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/edo/src': { 'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '51058a369f943064dc6db4f38dca32263d584ea5', 'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'd4d6f7da76f34b87b7b953176ef9e08eda83afb1',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -793,7 +799,7 @@ deps = {
}, },
'src/ios/third_party/material_components_ios/src': { 'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'a407ef3ac3220882732e701804613c44704ebf78', 'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '2cdac2db582f6067b014aa66a3846588352361a1',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -863,7 +869,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/linux-amd64', 'package': 'chromium/rts/model/linux-amd64',
'version': 'E7vzLhZk6xAJnnioidm0-d5a4cz1OxOr0LJUsCkKKJ0C', 'version': 'bCwganuATTWjTe2ahjxynn8rnTuSw900jtLOYmi3kV4C',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -874,7 +880,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/mac-amd64', 'package': 'chromium/rts/model/mac-amd64',
'version': '4wYh3p2y6ATe5OeiGmtl-G9thdrKGoX5DHzaP8V_tecC', 'version': 'ms15aJhiLzrF61zOZxq2jcESbsF3FFYtCS8R290t8JsC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -885,7 +891,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/windows-amd64', 'package': 'chromium/rts/model/windows-amd64',
'version': 'E5Y3kcrVZt1PybXoGxTDRb_KmswZX_5jBphOaHwm2fQC', 'version': 'xILgcx3FOZDE8fCy3EXmw76GOIrdmlVb3aQ5dUSq8x8C',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -953,7 +959,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/androidx', 'package': 'chromium/third_party/androidx',
'version': 'zEg6hTXAR6Mcqyfh8gHDzD9fmyy1xwz4swj6pkENIYsC', 'version': '8KUWsjmvRQJlzdaro14SgP8nok3-kHY2h00BEjXLJqQC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -991,7 +997,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/aapt2', 'package': 'chromium/third_party/android_build_tools/aapt2',
'version': '36NqCian2RIwuM6SFfizdUgKoXyZhy3q6pFfsws0szYC', 'version': 'cbNG7g8Sinh-lsT8hWsU-RyXqLT_uh4jIb1fjCdhrzIC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1013,7 +1019,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/bundletool', 'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'TpDdbF-PPgwL0iOVsdLM07L-DUp2DV3hgzCMmPd2_GUC', 'version': 'XIPSJgFHEHN1ogOJqWVktlbl8PTfLZdNf_G2h4GcnrYC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1024,7 +1030,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/lint', 'package': 'chromium/third_party/android_build_tools/lint',
'version': 'MSpv-kFDDSPO0SY0dLdHegUJcJT1Yy8cL9r3vlAZ9vkC', 'version': 'EPj7vnLteKz9kMQ6x4ZPaM5E20Bt4I0wTdrxdBpruZMC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1035,7 +1041,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/manifest_merger', 'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': 'EbRaK62t9grqlZqL-JTd_zwM4t1u9fm1x4c2rLE0cqQC', 'version': '5Zw4RYBL86koJro2O-jjcZYxOOdEW-hJDYykae8efQAC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1069,10 +1075,6 @@ deps = {
'package': 'chromium/third_party/android_sdk/public/platforms/android-33', 'package': 'chromium/third_party/android_sdk/public/platforms/android-33',
'version': Var('android_sdk_platforms_version'), 'version': Var('android_sdk_platforms_version'),
}, },
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-tiramisuprivacysandbox',
'version': Var('android_sdk_platforms_preview_version'),
},
{ {
'package': 'chromium/third_party/android_sdk/public/sources/android-31', 'package': 'chromium/third_party/android_sdk/public/sources/android-31',
'version': Var('android_sdk_sources_version'), 'version': Var('android_sdk_sources_version'),
@ -1120,7 +1122,7 @@ deps = {
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'), Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/breakpad/breakpad': 'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'abb105db21e962eda5b7d9b7a0ac8dd701e0b987', Var('chromium_git') + '/breakpad/breakpad.git' + '@' + '79326ebe9446add03e76b4422ff8036e812224d2',
'src/third_party/byte_buddy': { 'src/third_party/byte_buddy': {
'packages': [ 'packages': [
@ -1145,7 +1147,7 @@ deps = {
}, },
'src/third_party/cast_core/public/src': 'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + 'e42ef68aa05ac0c163805f60b9b19284f3c2dee3', Var('chromium_git') + '/cast_core/public' + '@' + 'f4628fda1b370eb238ae69545024d256ca62d719',
'src/third_party/catapult': 'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'), Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1174,7 +1176,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple # Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild. # Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': { 'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '52efbfdc210624f1895d5994149f53c3a4457f29', 'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'd60807b98527df86e516532b8e2a62a1cb44c128',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1192,27 +1194,23 @@ deps = {
# For Linux and Chromium OS. # For Linux and Chromium OS.
'src/third_party/cros_system_api': { 'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'ffb88930c81ef7f7026a2433c424d8b3658580d4', 'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'e0bfd3d75917adfa22e401805f9f9793cec82559',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
'src/third_party/crossbench':
Var('chromium_git') + '/crossbench.git' + '@' + Var('crossbench_revision'),
'src/third_party/crubit/src': { 'src/third_party/crubit/src': {
'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'), 'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'),
'condition': 'checkout_crubit', 'condition': 'checkout_crubit',
}, },
'src/third_party/depot_tools': 'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a6898e71abf374d699ebaa121312e89bad8f100a', Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'c023a6302fc665bae743a5833dea350fd3d0749f',
'src/third_party/devtools-frontend/src': 'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'), Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': { 'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '2ac32de43d557d678de46fb7cfc850187f3379fd', 'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '4c3517346586ea020e5859cf51488e534a90d15e',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1220,7 +1218,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d', Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src': 'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '3460f3558e7b469efb8a225894e21929c8c77629', Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '6156797016164b87b3e360e02d0e4107f7f66fbc',
'src/third_party/emoji-metadata/src': { 'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06', 'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
@ -1248,11 +1246,11 @@ deps = {
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '689da3a7ed50af7448c3f1961d1791c7c1d9c85c', Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '689da3a7ed50af7448c3f1961d1791c7c1d9c85c',
'src/third_party/flatbuffers/src': 'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'a56f9ec50e908362e20254fcef28e62a2f148d91', Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'b47ba1d5ffae3bd4d5eaad615e33d7cc5c1e3d4a',
# Used for embedded builds. CrOS & Linux use the system version. # Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': { 'src/third_party/fontconfig/src': {
'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '06929a556fdc39c8fe12965b69070c8df520a33e', 'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '452be8125f0e2a18a7dfef469e05d19374d36307',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1372,7 +1370,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e', Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu': 'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'faf22e66ceafad90f5cafbd6707055c24646adcc', Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '4e100720a20a471ca5ceff3b15a87596b694ada4',
'src/third_party/icu4j': { 'src/third_party/icu4j': {
'packages': [ 'packages': [
@ -1435,22 +1433,11 @@ deps = {
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'src/third_party/kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/kotlin_stdlib',
'version': 'Mg7371mEUwDQH4_z29HdWqYWVlXN6t2dXX0kIutg_SwC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/kotlinc/current': { 'src/third_party/kotlinc/current': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/kotlinc', 'package': 'chromium/third_party/kotlinc',
'version': 'bCZedwoM-hb1pP1QKzA3P5aR4zjZltqLj4JQpmQsHuUC', 'version': 'F-v9Yy4tNQtjGB7TtAWc2J-3qhx9Q6ixZJyuGixVH08C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1463,14 +1450,11 @@ deps = {
'src/third_party/libFuzzer/src': 'src/third_party/libFuzzer/src':
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'), Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
'src/third_party/centipede/src':
Var('chromium_git') + '/external/github.com/google/centipede.git' + '@' + Var('centipede_revision'),
'src/third_party/libaddressinput/src': 'src/third_party/libaddressinput/src':
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd', Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd',
'src/third_party/libaom/source/libaom': 'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '70b12695e1967d9589dd15b345a039e575e8f429', Var('aomedia_git') + '/aom.git' + '@' + '74d61ae86f20bc9fb707347bfe618425024f3865',
'src/third_party/libavif/src': 'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'), Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1503,7 +1487,7 @@ deps = {
}, },
'src/third_party/libjpeg_turbo': 'src/third_party/libjpeg_turbo':
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '0b6e6a152242c2fa30ffb29633d1b7d7b229b46b', Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'ed683925e4897a84b3bffc5c1414c85b97a129a3',
'src/third_party/liblouis/src': { 'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376', 'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
@ -1531,16 +1515,16 @@ deps = {
}, },
'src/third_party/libvpx/source/libvpx': 'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'db69ce6aea278bee88668fd9cc2af2e544516fdb', Var('chromium_git') + '/webm/libvpx.git' + '@' + 'b7c22b3a9584d7d9c0a7b9b37a52bc595113b398',
'src/third_party/libwebm/source': 'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da', Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libwebp/src': 'src/third_party/libwebp/src':
Var('chromium_git') + '/webm/libwebp.git' + '@' + 'fd7b5d48464475408d32d2611bdb6947d4246b97', Var('chromium_git') + '/webm/libwebp.git' + '@' + '603e8d7adb0ccc35237419c2938194623b60e9be',
'src/third_party/libyuv': 'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '2bdc210be9eb11ded16bf3ef1f6cadb0d4dcb0c2', Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '6e4b0acb4b3d5858c77a044aad46132998ac4a76',
'src/third_party/lighttpd': { 'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'), 'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1667,7 +1651,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9', Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9',
'src/third_party/openscreen/src': 'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '5d694418bc76f66463f06ce141c375062b0ba3b0', Var('chromium_git') + '/openscreen' + '@' + 'eca304d29cee3f9d045fd0dd36f147a91a367c75',
'src/third_party/openxr/src': { 'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245', 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1684,7 +1668,7 @@ deps = {
}, },
'src/third_party/perfetto': 'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '4bda78645d1d23a98473b793bc532a3ebff6c7f9', Var('android_git') + '/platform/external/perfetto.git' + '@' + '3aa2acd9af48d097ad94cf778c2228031e6c4dfa',
'src/third_party/perl': { 'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3', 'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1718,13 +1702,13 @@ deps = {
}, },
'src/third_party/re2/src': 'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '3a8436ac436124a57a4e22d5c8713a2d42b381d7', Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '8afcf7fcc481692197e33612446d69e8f5777c54',
'src/third_party/r8': { 'src/third_party/r8': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'HGbnG0_a1HHQtwgKBlFRLuC0-AVyYhHpcTol560MvlUC', 'version': 'lhnuNLpWpWBVM6efX0iIg5i9ztfW8VKpMvkyfWCxfr0C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1738,7 +1722,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'PwglNZFRNPkBBXdnY9NfrZFk2ULWDTRxhV9rl2kvkpUC', 'version': 'qGtBu6TtxyR5XNy4cmsslb7c946YtkZF5_QCjVP-wc8C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1762,7 +1746,7 @@ deps = {
}, },
'src/third_party/ruy/src': 'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '21a85fef159f9942f636a43b14c64b481c2a05b2', Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '3168a5c8f4c447fd8cea94078121ee2e2cd87df0',
'src/third_party/skia': 'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'), Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1774,7 +1758,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0', Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src': 'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '88742a54683bcdec9a0d0c14462621da8b6f841e', Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '469aae8118e18b7354607f8ef09780cf8f3e54aa',
'src/third_party/sqlite4java': { 'src/third_party/sqlite4java': {
'packages': [ 'packages': [
@ -1816,20 +1800,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f', Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src': 'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '60ec371de65a63d588bcfce7a99482847ad1312e', Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ef70dc999eee784e3f505e89c798f8b9cc894e52',
'src/third_party/turbine': { 'src/third_party/turbine': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/turbine', 'package': 'chromium/third_party/turbine',
'version': 'YQC-btuan_DTe9V9dv_e1LxgYSWeOoDfrd-VSqzIvHkC', 'version': 't0TeGgk2CZr3B0HtEqBb60BSRwVPRJm9066izhJwzz0C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3c1556cc73226c2895c1de9a925dc5fe623c8752', 'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@243deb3abd84f442957dc5394745d25482ff791b',
'src/third_party/vulkan_memory_allocator': 'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907', Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1866,10 +1850,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2', Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src': 'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '6c8361e98f1daba65902f5e2fc1297893ac14b67', Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'a7e54e7b964d08901cba6418ca00ffec501bc867',
'src/third_party/webrtc': 'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'd75b9e9ff07ee42841b4e416629c9fbd4b058905', Var('webrtc_git') + '/src.git' + '@' + 'b6b9b1fc761c039195faa033cb8fdde4ed8ba0a9',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use # Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file. # Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1887,7 +1871,7 @@ deps = {
}, },
'src/third_party/xnnpack/src': 'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '659147817805d17c7be2d60bd7bbca7e780f9c82', Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a50369c0fdd15f0f35b1a91c964644327a88d480',
'src/tools/page_cycler/acid3': 'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521', Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1896,7 +1880,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/linux-amd64', 'package': 'skia/tools/goldctl/linux-amd64',
'version': '-G9gUusEGDPsbf_GULdyJo9DYyeNBuqD8gHfdxCvIbYC', 'version': 'PZOpm-VdLUuaVE8seo910YRCnqv7Y2BkPcrmUs13RMAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1906,7 +1890,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/windows-amd64', 'package': 'skia/tools/goldctl/windows-amd64',
'version': 'BZ0EL-KSkwCzJciJf9MbwmZAJPRhlKOp0LEYiTV6lWIC', 'version': 'qp3u_bn43vFlG3HHG61Co9eOeo52m6SWIw099mHqt9EC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1917,7 +1901,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-amd64', 'package': 'skia/tools/goldctl/mac-amd64',
'version': '0GVvuvDBNt6KJ7UzxBRUW5ShTWCliifyzaCkudNzmrkC', 'version': 'aZ8KYJUPYrRq4f7-Td3nt0et_16S06A0vovOn2c85tIC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1928,7 +1912,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-arm64', 'package': 'skia/tools/goldctl/mac-arm64',
'version': '8vKG1ZGA0f7asv5AHh_7yBxVD2h-I-yR2oY4TOjwo6kC', 'version': 'JtcfJFsvsUuaaIajqvwETn8j5hxOSC_YLDszV96Ukn8C',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1939,7 +1923,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'), Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': { 'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@67c8cac0a84ad86b64ecf3f4af23a928fb605313', 'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@f1d52b8c1ec0769ac006917d1fe42e99a4dba6c3',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1958,7 +1942,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/eche_app/app', 'package': 'chromeos_internal/apps/eche_app/app',
'version': 'WyNqAPOj-HR5fZBbkHIXf7YeyCvf0GpXuhdv6EqzNJsC', 'version': 'Y9Vb3-WAI0IRjTRTVDtPP86MNNpZItvfey3JuYZXXeYC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1969,7 +1953,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/help_app/app', 'package': 'chromeos_internal/apps/help_app/app',
'version': 'hF_ZkOgJWb6Tl-9h6WAmpF4VcZggBH4rjoP_hBr2ddUC', 'version': 'J19Uq07iO__IsduQFotOfHNdiRWoyIQc4UgK1HpMFU8C',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1980,7 +1964,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/media_app/app', 'package': 'chromeos_internal/apps/media_app/app',
'version': 'EXosTZG9iiyjnqmWKjS04Tf9dvSUjbHqqhGv1SQW0ycC', 'version': 'CHpgn1-7IChFiK96I1-giMbXe-Cl9XQiwH3aHwCGzYwC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1991,7 +1975,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/projector_app/app', 'package': 'chromeos_internal/apps/projector_app/app',
'version': 'zmInwk2DIsJlzZbF9Fw29hmN6rQTpzqIgzzMAgwl2PkC', 'version': 'ufJ9DwqTBE76l81FUQQ2JOIG1ely5QRDFuwz3ccJIRIC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -2010,7 +1994,7 @@ deps = {
}, },
'src/third_party/android_prebuilts/build_tools': { 'src/third_party/android_prebuilts/build_tools': {
'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '673c20b524a83b662d8c1057fd3eec8fd0f93f9d', 'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '5794e96eb8bae47bb48feee915d99583573b3887',
'condition': 'checkout_android_prebuilts_build_tools', 'condition': 'checkout_android_prebuilts_build_tools',
}, },
@ -2585,7 +2569,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement',
'version': 'version:2@18.1.0.cr1', 'version': 'version:2@18.0.1.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2739,7 +2723,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks',
'version': 'version:2@18.0.2.cr1', 'version': 'version:2@18.0.1.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2779,22 +2763,11 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/com_google_android_play_core_common': { 'src/third_party/android_deps/libs/com_google_android_play_core': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core_common', 'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core',
'version': 'version:2@2.0.2.cr1', 'version': 'version:2@1.10.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_play_feature_delivery': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_feature_delivery',
'version': 'version:2@2.0.1.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3355,7 +3328,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy', 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy',
'version': 'version:2@1.12.22.cr1', 'version': 'version:2@1.12.13.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3366,7 +3339,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent', 'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent',
'version': 'version:2@1.12.22.cr1', 'version': 'version:2@1.12.13.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3505,6 +3478,39 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/org_jetbrains_annotations': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_annotations',
'version': 'version:2@13.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': { 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': {
'packages': [ 'packages': [
{ {
@ -3575,7 +3581,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core', 'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core',
'version': 'version:2@5.1.1.cr1', 'version': 'version:2@4.7.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3586,7 +3592,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis', 'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis',
'version': 'version:2@3.3.cr1', 'version': 'version:2@3.2.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3870,26 +3876,16 @@ include_rules = [
'+third_party/icu/source/i18n/unicode', '+third_party/icu/source/i18n/unicode',
'+url', '+url',
# Abseil is allowed by default, but some features are banned. See # Abseil features are allowlisted explicitly. See
# //styleguide/c++/c++-features.md. # //styleguide/c++/c++-features.md.
'+third_party/abseil-cpp', '-absl',
'-third_party/abseil-cpp/absl/algorithm/container.h', '-third_party/abseil-cpp',
'-third_party/abseil-cpp/absl/container', '+third_party/abseil-cpp/absl/base/attributes.h',
'-third_party/abseil-cpp/absl/crc', '+third_party/abseil-cpp/absl/cleanup/cleanup.h',
'-third_party/abseil-cpp/absl/flags', '+third_party/abseil-cpp/absl/numeric/int128.h',
'-third_party/abseil-cpp/absl/functional/any_invocable.h', '+third_party/abseil-cpp/absl/types/optional.h',
'-third_party/abseil-cpp/absl/functional/bind_front.h', '+third_party/abseil-cpp/absl/types/variant.h',
'-third_party/abseil-cpp/absl/functional/function_ref.h', '+third_party/abseil-cpp/absl/utility/utility.h',
'-third_party/abseil-cpp/absl/hash',
'-third_party/abseil-cpp/absl/log',
'-third_party/abseil-cpp/absl/random',
'-third_party/abseil-cpp/absl/status/statusor.h',
'-third_party/abseil-cpp/absl/strings',
'+third_party/abseil-cpp/absl/strings/cord.h',
'-third_party/abseil-cpp/absl/synchronization',
'-third_party/abseil-cpp/absl/time',
'-third_party/abseil-cpp/absl/types/any.h',
'-third_party/abseil-cpp/absl/types/span.h',
] ]
@ -4357,6 +4353,30 @@ hooks = [
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1', '-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1',
], ],
}, },
{
'name': 'msan_chained_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1',
],
},
{
'name': 'msan_no_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1',
],
},
{ {
'name': 'wasm_fuzzer', 'name': 'wasm_fuzzer',
'pattern': '.', 'pattern': '.',
@ -4552,7 +4572,7 @@ hooks = [
{ {
'name': 'Fetch Android AFDO profile', 'name': 'Fetch Android AFDO profile',
'pattern': '.', 'pattern': '.',
'condition': 'checkout_android', 'condition': 'checkout_android or checkout_linux',
'action': [ 'python3', 'action': [ 'python3',
'src/tools/download_optimization_profile.py', 'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/newest.txt', '--newest_state=src/chrome/android/profiles/newest.txt',
@ -4561,18 +4581,6 @@ hooks = [
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm', '--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
], ],
}, },
{
'name': 'Fetch Android Arm AFDO profile',
'pattern': '.',
'condition': 'checkout_android',
'action': [ 'python3',
'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/arm.newest.txt',
'--local_state=src/chrome/android/profiles/arm.local.txt',
'--output_name=src/chrome/android/profiles/arm.afdo.prof',
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
],
},
{ {
'name': 'gvr_static_shim_android', 'name': 'gvr_static_shim_android',
'pattern': '\\.sha1', 'pattern': '\\.sha1',

File diff suppressed because it is too large Load Diff

View File

@ -207,7 +207,7 @@ AllocatorDispatch g_allocator_dispatch = {&AllocFn,
} // namespace base::allocator::dispatcher::allocator_shim_details } // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher::partition_allocator_details { namespace base::allocator::dispatcher::partition_allocator_details {
namespace { namespace {
@ -222,7 +222,7 @@ void PartitionFreeHook(void* address) {
} // namespace } // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details } // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
@ -236,11 +236,11 @@ void InstallStandardAllocatorHooks() {
// happen for tests. // happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks( partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook, &partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook); &partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
} }
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher

View File

@ -6,8 +6,6 @@
#include "base/check.h" #include "base/check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/debug/crash_logging.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
@ -17,25 +15,15 @@
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
// pthread_key_t has different signedness on Mac and Android. Store the null pthread_key_t ReentryGuard::entered_key_ = 0;
// value in a strongly-typed constant to avoid "comparison of integers of
// different signs" warnings when comparing with 0.
constexpr pthread_key_t kNullKey = 0;
pthread_key_t ReentryGuard::entered_key_ = kNullKey;
void ReentryGuard::InitTLSSlot() { void ReentryGuard::InitTLSSlot() {
if (entered_key_ == kNullKey) { if (entered_key_ == 0) {
int error = pthread_key_create(&entered_key_, nullptr); int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error); CHECK(!error);
// Touch the TLS slot immediately to force any allocations.
// TODO(https://crbug.com/1411454): Use this technique to avoid allocations
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
// ReentryGuard redundant.
pthread_setspecific(entered_key_, nullptr);
} }
DCHECK_NE(entered_key_, kNullKey); DCHECK(entered_key_ != 0);
} }
#else #else
@ -43,19 +31,4 @@ void ReentryGuard::InitTLSSlot() {
void ReentryGuard::InitTLSSlot() {} void ReentryGuard::InitTLSSlot() {}
#endif #endif
void ReentryGuard::RecordTLSSlotToCrashKey() {
// Record the key in crash dumps to detect when it's higher than 32
// (PTHREAD_KEY_2NDLEVEL_SIZE).
// TODO(crbug.com/1411454): Remove this after diagnosing reentry crashes.
static auto* const crash_key = base::debug::AllocateCrashKeyString(
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
base::debug::SetCrashKeyString(crash_key, base::NumberToString(entered_key_));
#else
base::debug::SetCrashKeyString(crash_key, "unused");
#endif
}
} // namespace base::allocator::dispatcher } // namespace base::allocator::dispatcher

View File

@ -23,10 +23,8 @@ namespace base::allocator::dispatcher {
// twice. The scoped guard allows us to detect that. // twice. The scoped guard allows us to detect that.
// //
// Besides that the implementations of thread_local on macOS and Android // Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables // seem to allocate memory lazily on the first access to thread_local variables.
// (and on Android at least thread_local is implemented on top of pthread so is // Make use of pthread TLS instead of C++ thread_local there.
// strictly worse for performance). Make use of pthread TLS instead of C++
// thread_local there.
struct BASE_EXPORT ReentryGuard { struct BASE_EXPORT ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) { ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true)); pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
@ -39,19 +37,12 @@ struct BASE_EXPORT ReentryGuard {
explicit operator bool() const noexcept { return allowed_; } explicit operator bool() const noexcept { return allowed_; }
// This function must be called before installing any allocator hooks because // This function must be called in very early of the process start-up in
// some TLS implementations may allocate (eg. glibc will require a malloc call // order to acquire a low TLS slot number because glibc TLS implementation
// to allocate storage for a higher slot number (>= PTHREAD_KEY_2NDLEVEL_SIZE // will require a malloc call to allocate storage for a higher slot number
// == 32). This touches the thread-local storage so that any malloc happens // (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
// before installing the hooks.
static void InitTLSSlot(); static void InitTLSSlot();
// InitTLSSlot() is called before crash keys are available. At some point
// after SetCrashKeyImplementation() is called, this function should be
// called to record `entered_key_` to a crash key for debugging. This may
// allocate so it must not be called from inside an allocator hook.
static void RecordTLSSlotToCrashKey();
private: private:
static pthread_key_t entered_key_; static pthread_key_t entered_key_;
const bool allowed_; const bool allowed_;
@ -65,7 +56,6 @@ struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; } constexpr explicit operator bool() const noexcept { return true; }
static void InitTLSSlot(); static void InitTLSSlot();
static void RecordTLSSlotToCrashKey();
}; };
#endif #endif

View File

@ -32,16 +32,10 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
BASE_FEATURE(kPartitionAllocDanglingPtr, BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr", "PartitionAllocDanglingPtr",
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) FEATURE_DISABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif
);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = { constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"}, {DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogOnly, "log_only"}, {DanglingPtrMode::kLogSignature, "log_signature"},
}; };
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr, &kPartitionAllocDanglingPtr,
@ -49,24 +43,14 @@ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
DanglingPtrMode::kCrash, DanglingPtrMode::kCrash,
&kDanglingPtrModeOption, &kDanglingPtrModeOption,
}; };
constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
&kDanglingPtrTypeOption,
};
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't // If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly. // disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan, BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan", "PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition. // If enabled, PCScan is turned on only for the browser's malloc partition.
@ -104,8 +88,7 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
BASE_FEATURE(kPartitionAllocBackupRefPtr, BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX)) (BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
@ -200,11 +183,11 @@ BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
// In addition to heap, scan also the stack of the current mutator. // In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning, BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning", "PartitionAllocPCScanStackScanning",
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED) #if defined(PA_PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED) #endif // defined(PA_PCSCAN_STACK_SUPPORTED)
); );
BASE_FEATURE(kPartitionAllocDCScan, BASE_FEATURE(kPartitionAllocDCScan,

View File

@ -6,6 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/feature_list.h" #include "base/feature_list.h"
@ -24,6 +25,10 @@ extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam; kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md // See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
enum class DanglingPtrMode { enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr. // Crash immediately after detecting a dangling raw_ptr.
@ -31,33 +36,19 @@ enum class DanglingPtrMode {
// Log the signature of every occurrences without crashing. It is used by // Log the signature of every occurrences without crashing. It is used by
// bots. // bots.
// Format "[DanglingSignature]\t<1>\t<2>\t<3>\t<4>" // Format "[DanglingSignature]\t<1>\t<2>"
// 1. The function which freed the memory while it was still referenced. // 1. The function who freed the memory while it was still referenced.
// 2. The task in which the memory was freed. // 2. The function who released the raw_ptr reference.
// 3. The function which released the raw_ptr reference. kLogSignature,
// 4. The task in which the raw_ptr was released.
kLogOnly,
// Note: This will be extended with a single shot DumpWithoutCrashing. // Note: This will be extended with a single shot DumpWithoutCrashing.
}; };
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode> extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam; kDanglingPtrModeParam;
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
// Detect when freeing memory and releasing the dangling raw_ptr happens in #if PA_CONFIG(ALLOW_PCSCAN)
// a different task. Those are more likely to cause use after free.
kCrossTask,
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly); BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);

View File

@ -48,14 +48,14 @@
#include "build/build_config.h" #include "build/build_config.h"
#include "third_party/abseil-cpp/absl/types/optional.h" #include "third_party/abseil-cpp/absl/types/optional.h"
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h" #include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h" #include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/allocator/partition_allocator/starscan/stats_reporter.h" #include "base/allocator/partition_allocator/starscan/stats_reporter.h"
#include "base/memory/nonscannable_memory.h" #include "base/memory/nonscannable_memory.h"
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h" #include "base/system/sys_info.h"
@ -74,13 +74,13 @@ namespace {
namespace switches { namespace switches {
[[maybe_unused]] constexpr char kRendererProcess[] = "renderer"; [[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
constexpr char kZygoteProcess[] = "zygote"; constexpr char kZygoteProcess[] = "zygote";
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
constexpr char kGpuProcess[] = "gpu-process"; constexpr char kGpuProcess[] = "gpu-process";
constexpr char kUtilityProcess[] = "utility"; constexpr char kUtilityProcess[] = "utility";
#endif #endif
} // namespace switches } // namespace switches
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString( constexpr const char* ScannerIdToTracingString(
@ -181,11 +181,11 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
static constexpr char kTraceCategory[] = "partition_alloc"; static constexpr char kTraceCategory[] = "partition_alloc";
}; };
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
} // namespace } // namespace
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
void RegisterPCScanStatsReporter() { void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter; static StatsReporterImpl s_reporter;
static bool registered = false; static bool registered = false;
@ -195,7 +195,7 @@ void RegisterPCScanStatsReporter() {
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter); partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true; registered = true;
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
namespace { namespace {
@ -302,7 +302,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
} }
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
[[maybe_unused]] bool pcscan_enabled = [[maybe_unused]] bool pcscan_enabled =
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly); FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
#else #else
false; false;
@ -378,7 +378,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
// fully controlled by Finch and thus have identical population sizes. // fully controlled by Finch and thus have identical population sizes.
std::string pcscan_group_name = "Unavailable"; std::string pcscan_group_name = "Unavailable";
std::string pcscan_group_name_fallback = "Unavailable"; std::string pcscan_group_name_fallback = "Unavailable";
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
if (brp_truly_enabled) { if (brp_truly_enabled) {
// If BRP protection is enabled, just ignore the population. Check // If BRP protection is enabled, just ignore the population. Check
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes // brp_truly_enabled, not brp_finch_enabled, because there are certain modes
@ -395,7 +395,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
} else { } else {
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled"); pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
trials.emplace("PCScan_Effective", pcscan_group_name); trials.emplace("PCScan_Effective", pcscan_group_name);
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback); trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@ -415,13 +415,11 @@ namespace {
internal::PartitionLock g_stack_trace_buffer_lock; internal::PartitionLock g_stack_trace_buffer_lock;
struct DanglingPointerFreeInfo { struct StackTraceWithID {
debug::StackTrace stack_trace; debug::StackTrace stack_trace;
debug::TaskTrace task_trace;
uintptr_t id = 0; uintptr_t id = 0;
}; };
using DanglingRawPtrBuffer = using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
std::array<absl::optional<DanglingPointerFreeInfo>, 32>;
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock); DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
void DanglingRawPtrDetected(uintptr_t id) { void DanglingRawPtrDetected(uintptr_t id) {
@ -430,14 +428,14 @@ void DanglingRawPtrDetected(uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock); internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) { for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
PA_DCHECK(!entry || entry->id != id); PA_DCHECK(!entry || entry->id != id);
} }
#endif // DCHECK_IS_ON() #endif // DCHECK_IS_ON()
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) { for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (!entry) { if (!entry) {
entry = {debug::StackTrace(), debug::TaskTrace(), id}; entry = {debug::StackTrace(), id};
return; return;
} }
} }
@ -446,16 +444,15 @@ void DanglingRawPtrDetected(uintptr_t id) {
// enough. // enough.
} }
// From the traces recorded in |DanglingRawPtrDetected|, extract the one // From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one
// whose id match |id|. Return nullopt if not found. // whose id match |id|. Return nullopt if not found.
absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo( absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock); internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) { for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
if (entry && entry->id == id) { if (entry && entry->id == id) {
absl::optional<DanglingPointerFreeInfo> result(entry); debug::StackTrace stack_trace = std::move(entry->stack_trace);
entry = absl::nullopt; entry = absl::nullopt;
return result; return stack_trace;
} }
} }
return absl::nullopt; return absl::nullopt;
@ -466,31 +463,14 @@ absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
// are all the dangling raw_ptr occurrences in a table. // are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) { std::string ExtractDanglingPtrSignature(std::string stacktrace) {
std::vector<StringPiece> lines = SplitStringPiece( std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY); stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
// We are looking for the callers of the function releasing the raw_ptr and // We are looking for the callers of the function releasing the raw_ptr and
// freeing memory: // freeing memory:
const StringPiece callees[] = { const StringPiece callees[] = {
// Common signatures
"internal::PartitionFree",
"base::(anonymous namespace)::FreeFn",
// Linux signatures
"internal::RawPtrBackupRefImpl<>::ReleaseInternal()", "internal::RawPtrBackupRefImpl<>::ReleaseInternal()",
"base::RefCountedThreadSafe<>::Release()", "internal::PartitionFree()",
"base::(anonymous namespace)::FreeFn()",
// Windows signatures
"internal::RawPtrBackupRefImpl<0>::ReleaseInternal",
"_free_base",
// Windows stack traces are prefixed with "Backtrace:"
"Backtrace:",
// Mac signatures
"internal::RawPtrBackupRefImpl<false>::ReleaseInternal",
// Task traces are prefixed with "Task trace:" in
// |TaskTrace::OutputToStream|
"Task trace:",
}; };
size_t caller_index = 0; size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) { for (size_t i = 0; i < lines.size(); ++i) {
@ -501,142 +481,78 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
} }
} }
if (caller_index >= lines.size()) { if (caller_index >= lines.size()) {
return "no_callee_match"; return "undefined";
} }
StringPiece caller = lines[caller_index]; StringPiece caller = lines[caller_index];
if (caller.empty()) { // |callers| follows the following format:
return "invalid_format"; //
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend()
// -- -------------- -----------------------------------------------
// Depth Address Function
size_t address_start = caller.find(' ');
size_t function_start = caller.find(' ', address_start + 1);
if (address_start == caller.npos || function_start == caller.npos) {
return "undefined";
} }
// On Posix platforms |callers| follows the following format: return std::string(caller.substr(function_start + 1));
//
// #<index> <address> <symbol>
//
// See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] == '#') {
const size_t address_start = caller.find(' ');
const size_t function_start = caller.find(' ', address_start + 1);
if (address_start == caller.npos || function_start == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(function_start + 1));
}
// On Windows platforms |callers| follows the following format:
//
// \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
//
// See https://crsrc.org/c/base/debug/stack_trace_win.cc
if (caller[0] == '\t') {
const size_t symbol_start = 1;
const size_t symbol_end = caller.find(' ');
if (symbol_end == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
}
// On Mac platforms |callers| follows the following format:
//
// <index> <library> 0x<address> <symbol> + <line>
//
// See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] >= '0' && caller[0] <= '9') {
const size_t address_start = caller.find("0x");
const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
const size_t symbol_end = caller.find(' ', symbol_start);
if (symbol_start == caller.npos || symbol_end == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
}
return "invalid_format";
} }
std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) { void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
if (task_trace.empty()) { // This is called from raw_ptr<>'s release operation. Making allocations is
return "No active task"; // allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
} else {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
} }
return ExtractDanglingPtrSignature(task_trace.ToString());
} }
std::string ExtractDanglingPtrSignature( void DanglingRawPtrReleasedCrash(uintptr_t id) {
absl::optional<DanglingPointerFreeInfo> free_info,
debug::StackTrace release_stack_trace,
debug::TaskTrace release_task_trace) {
if (free_info) {
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s",
ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
template <features::DanglingPtrMode dangling_pointer_mode,
features::DanglingPtrType dangling_pointer_type>
void DanglingRawPtrReleased(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is // This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may // allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory. // allocate memory.
debug::StackTrace stack_trace_release; debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release; debug::TaskTrace task_trace_release;
absl::optional<DanglingPointerFreeInfo> free_info = absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
TakeDanglingPointerFreeInfo(id);
if constexpr (dangling_pointer_type ==
features::DanglingPtrType::kCrossTask) {
if (!free_info) {
return;
}
if (task_trace_release.ToString() == free_info->task_trace.ToString()) {
return;
}
}
std::string dangling_signature = ExtractDanglingPtrSignature(
free_info, stack_trace_release, task_trace_release);
static const char dangling_ptr_footer[] = static const char dangling_ptr_footer[] =
"\n" "\n"
"\n" "\n"
"Please check for more information on:\n" "Please check for more information on:\n"
"https://chromium.googlesource.com/chromium/src/+/main/docs/" "https://chromium.googlesource.com/chromium/src/+/main/docs/"
"dangling_ptr_guide.md\n" "dangling_ptr_guide.md\n";
"\n"
"Googlers: Please give us your feedback about the dangling pointer\n" if (stack_trace_free) {
" detector at:\n"
" http://go/dangling-ptr-cq-survey\n";
if (free_info) {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "The memory was freed at:\n" << "The memory was freed at:\n"
<< free_info->stack_trace << free_info->task_trace << "\n" << *stack_trace_free << "\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release << stack_trace_release << task_trace_release
<< dangling_ptr_footer; << dangling_ptr_footer;
} else { } else {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n" << "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release << stack_trace_release << task_trace_release
<< dangling_ptr_footer; << dangling_ptr_footer;
} }
ImmediateCrash();
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
ImmediateCrash();
}
} }
void ClearDanglingRawPtrBuffer() { void ClearDanglingRawPtrBuffer() {
@ -657,35 +573,16 @@ void InstallDanglingRawPtrChecks() {
return; return;
} }
partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
switch (features::kDanglingPtrModeParam.Get()) { switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash: case features::DanglingPtrMode::kCrash:
switch (features::kDanglingPtrTypeParam.Get()) { partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
case features::DanglingPtrType::kAll: partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kCrossTask>);
break;
}
break; break;
case features::DanglingPtrMode::kLogOnly:
switch (features::kDanglingPtrTypeParam.Get()) { case features::DanglingPtrMode::kLogSignature:
case features::DanglingPtrType::kAll: partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn( partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly, DanglingRawPtrReleasedLogSignature);
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
features::DanglingPtrType::kCrossTask>);
break;
}
break; break;
} }
} }
@ -735,7 +632,7 @@ void InstallUnretainedDanglingRawPtrChecks() {
namespace { namespace {
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
void SetProcessNameForPCScan(const std::string& process_type) { void SetProcessNameForPCScan(const std::string& process_type) {
const char* name = [&process_type] { const char* name = [&process_type] {
if (process_type.empty()) { if (process_type.empty()) {
@ -816,7 +713,7 @@ bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false; return false;
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
} // namespace } // namespace
@ -1032,7 +929,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// If BRP is not enabled, check if any of PCScan flags is enabled. // If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false; [[maybe_unused]] bool scan_enabled = false;
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
if (!enable_brp) { if (!enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded(); scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process. // No specified process type means this is the Browser process.
@ -1066,10 +963,10 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
SetProcessNameForPCScan(process_type); SetProcessNameForPCScan(process_type);
} }
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
// Non-quarantinable partition is dealing with hot V8's zone allocations. // Non-quarantinable partition is dealing with hot V8's zone allocations.
// In case PCScan is enabled in Renderer, enable thread cache on this // In case PCScan is enabled in Renderer, enable thread cache on this
// partition. At the same time, thread cache on the main(malloc) partition // partition. At the same time, thread cache on the main(malloc) partition
@ -1079,7 +976,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
.root() .root()
->EnableThreadCacheIfSupported(); ->EnableThreadCacheIfSupported();
} else } else
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
{ {
allocator_shim::internal::PartitionAllocMalloc::Allocator() allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported(); ->EnableThreadCacheIfSupported();
@ -1161,7 +1058,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) && #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanMUAwareScheduler)) { base::features::kPartitionAllocPCScanMUAwareScheduler)) {
// Assign PCScan a task-based scheduling backend. // Assign PCScan a task-based scheduling backend.
@ -1173,7 +1070,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend( partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
*mu_aware_task_based_backend.get()); *mu_aware_task_based_backend.get());
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::allocator::StartMemoryReclaimer( base::allocator::StartMemoryReclaimer(
@ -1239,11 +1136,4 @@ void PartitionAllocSupport::OnBackgrounded() {
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} }
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
std::string stacktrace) {
return ExtractDanglingPtrSignature(stacktrace);
}
#endif
} // namespace base::allocator } // namespace base::allocator

View File

@ -19,7 +19,7 @@
namespace base::allocator { namespace base::allocator {
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter(); BASE_EXPORT void RegisterPCScanStatsReporter();
#endif #endif
@ -75,11 +75,6 @@ class BASE_EXPORT PartitionAllocSupport {
void OnForegrounded(bool has_main_frame); void OnForegrounded(bool has_main_frame);
void OnBackgrounded(); void OnBackgrounded();
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
static std::string ExtractDanglingPtrSignatureForTests(
std::string stacktrace);
#endif
static PartitionAllocSupport* Get() { static PartitionAllocSupport* Get() {
static auto* singleton = new PartitionAllocSupport(); static auto* singleton = new PartitionAllocSupport();
return singleton; return singleton;

View File

@ -289,20 +289,19 @@ component("partition_alloc") {
} }
if (use_starscan) { if (use_starscan) {
if (current_cpu == "x64") { if (current_cpu == "x64") {
assert(pcscan_stack_supported) defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") { } else if (current_cpu == "x86") {
assert(pcscan_stack_supported) defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
} else if (current_cpu == "arm") { } else if (current_cpu == "arm") {
assert(pcscan_stack_supported) defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") { } else if (current_cpu == "arm64") {
assert(pcscan_stack_supported) defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ] sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
} else { } else {
# To support a trampoline for another arch, please refer to v8/src/heap/base. # To support a trampoline for another arch, please refer to v8/src/heap/base.
assert(!pcscan_stack_supported)
} }
} }
public_deps = [ public_deps = [
@ -398,12 +397,6 @@ source_set("raw_ptr") {
# See also: `partition_alloc_base/component_export.h` # See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ] defines = [ "IS_RAW_PTR_IMPL" ]
# When built inside Chromium, although this cannot directly be made a
# component, we expect `//base` to provide the only GN-level access.
if (build_with_chromium) {
visibility = [ "//base" ]
}
} }
buildflag_header("partition_alloc_buildflags") { buildflag_header("partition_alloc_buildflags") {
@ -422,15 +415,12 @@ buildflag_header("partition_alloc_buildflags") {
# defines and partition alloc includes the header file. For chrome, # defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes. # gen/base/allocator/buildflags.h defines and chrome includes.
flags = [ flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"USE_PARTITION_ALLOC=$use_partition_alloc", "USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc", "USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support", "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks", "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks", "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment", "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr", "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot", "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
@ -452,17 +442,9 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata", "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_STARSCAN=$use_starscan", "USE_STARSCAN=$use_starscan",
"PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
"ENABLE_PKEYS=$enable_pkeys", "ENABLE_PKEYS=$enable_pkeys",
] ]
if (is_apple) {
# TODO(crbug.com/1414153): once TimeTicks::Now behavior is unified on iOS,
# this should be removed.
flags += [ "PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS=" +
"$partition_alloc_enable_mach_absolute_time_ticks" ]
}
} }
buildflag_header("chromecast_buildflags") { buildflag_header("chromecast_buildflags") {

View File

@ -158,9 +158,5 @@ specific_include_rules = {
"+base", "+base",
"+third_party/abseil-cpp/absl/types/optional.h", "+third_party/abseil-cpp/absl/types/optional.h",
"+third_party/abseil-cpp/absl/types/variant.h", "+third_party/abseil-cpp/absl/types/variant.h",
],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
] ]
} }

View File

@ -34,7 +34,7 @@ AddressPoolManager& AddressPoolManager::GetInstance() {
return singleton_; return singleton_;
} }
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
namespace { namespace {
@ -77,9 +77,8 @@ uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
} }
void AddressPoolManager::ResetForTesting() { void AddressPoolManager::ResetForTesting() {
for (size_t i = 0; i < std::size(aligned_pools_.pools_); ++i) { for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i)
aligned_pools_.pools_[i].Reset(); aligned_pools_.pools_[i].Reset();
}
} }
void AddressPoolManager::Remove(pool_handle handle) { void AddressPoolManager::Remove(pool_handle handle) {
@ -103,7 +102,7 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle, void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address, uintptr_t address,
size_t length) { size_t length) {
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools); PA_DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized()); PA_DCHECK(pool->IsInitialized());
DecommitPages(address, length); DecommitPages(address, length);
@ -300,7 +299,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true; return true;
} }
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_CONFIG(HAS_64_BITS_POINTERS)
static_assert( static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
@ -532,7 +531,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true; return true;
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) { void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{}; AddressSpaceStats stats{};

View File

@ -15,8 +15,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h" #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -53,7 +53,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
AddressPoolManager(const AddressPoolManager&) = delete; AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete; AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length); void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle); void Remove(pool_handle handle);
@ -63,7 +63,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// Return the base address of a pool. // Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle); uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
// Reserves address space from the pool. // Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle, uintptr_t Reserve(pool_handle handle,
@ -76,7 +76,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
size_t length); size_t length);
void ResetForTesting(); void ResetForTesting();
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size); void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size); void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
@ -87,7 +87,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
static bool IsManagedByBRPPool(uintptr_t address) { static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address); return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
} }
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper); void DumpStats(AddressSpaceStatsDumper* dumper);
@ -107,7 +107,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// if PartitionAlloc is wholly unused in this process.) // if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats); bool GetStats(AddressSpaceStats* stats);
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
class Pool { class Pool {
public: public:
constexpr Pool() = default; constexpr Pool() = default;
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
}; };
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) { PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools); PA_DCHECK(0 < handle && handle <= kNumPools);
return &aligned_pools_.pools_[handle - 1]; return &aligned_pools_.pools_[handle - 1];
} }
@ -168,7 +168,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {}; char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {};
} aligned_pools_ PA_PKEY_ALIGN; } aligned_pools_ PA_PKEY_ALIGN;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_; static PA_CONSTINIT AddressPoolManager singleton_;
}; };

View File

@ -7,7 +7,7 @@
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -34,4 +34,4 @@ std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_CONFIG(HAS_64_BITS_POINTERS)

View File

@ -14,11 +14,12 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
namespace partition_alloc { namespace partition_alloc {
@ -184,6 +185,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc } // namespace partition_alloc
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -7,7 +7,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
enum pool_handle : unsigned; using pool_handle = unsigned;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -4,8 +4,8 @@
#include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -18,7 +18,7 @@ namespace partition_alloc {
uintptr_t GetRandomPageBase() { uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue()); uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
random <<= 32ULL; random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue()); random |= static_cast<uintptr_t>(internal::RandomValue());
@ -26,7 +26,7 @@ uintptr_t GetRandomPageBase() {
// OS and build configuration. // OS and build configuration.
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes // On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the // excessive fragmentation. Plus most of these systems lack ASLR, so the
@ -40,7 +40,7 @@ uintptr_t GetRandomPageBase() {
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random; return random;

View File

@ -9,6 +9,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
namespace partition_alloc { namespace partition_alloc {
@ -19,9 +20,9 @@ struct PoolStats {
// On 32-bit, pools are mainly logical entities, intermingled with // On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available // allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case. // reservation" is not possible to measure in that case.
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
size_t largest_available_reservation = 0; size_t largest_available_reservation = 0;
#endif #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
}; };
struct AddressSpaceStats { struct AddressSpaceStats {
@ -29,14 +30,14 @@ struct AddressSpaceStats {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PoolStats brp_pool_stats; PoolStats brp_pool_stats;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
PoolStats configurable_pool_stats; PoolStats configurable_pool_stats;
#else #else
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t blocklist_size; // measured in super pages size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count; size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
PoolStats pkey_pool_stats; PoolStats pkey_pool_stats;
#endif #endif

View File

@ -14,6 +14,6 @@ use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = true put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false enable_dangling_raw_ptr_checks_default = false

View File

@ -6,9 +6,20 @@ standalone repository for PartitionAlloc is hosted
## GN Args ## GN Args
External clients should examine the args described in External clients mainly need to set these six GN args:
`build_overrides/partition_alloc.gni` and add them in their own source
tree. PartitionAlloc's build will expect them at ``` none
# These are blocked on PA-E and `raw_ptr.h` and can never be true until
# we make them part of the standalone PA distribution.
use_partition_alloc_as_malloc_default = false
enable_mte_checked_ptr_support_default = false
enable_backup_ref_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
```
PartitionAlloc's build will expect them at
`//build_overrides/partition_alloc.gni`. `//build_overrides/partition_alloc.gni`.
In addition, something must provide `build_with_chromium = false` to In addition, something must provide `build_with_chromium = false` to

View File

@ -136,18 +136,8 @@ bool UseMapJit() {
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) == return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue; kCFBooleanTrue;
} }
#elif BUILDFLAG(IS_IOS) #endif // BUILDFLAG(IS_MAC)
bool UseMapJit() {
// Always enable MAP_JIT in simulator as it is supported unconditionally.
#if TARGET_IPHONE_SIMULATOR
return true;
#else
// TODO(https://crbug.com/1413818): Fill this out when the API it is
// available.
return false;
#endif // TARGET_IPHONE_SIMULATOR
}
#endif // BUILDFLAG(IS_IOS)
} // namespace } // namespace
// |mmap| uses a nearby address if the hint address is blocked. // |mmap| uses a nearby address if the hint address is blocked.
@ -176,7 +166,7 @@ uintptr_t SystemAllocPagesInternal(uintptr_t hint,
int access_flag = GetAccessFlags(accessibility); int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE; int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_MAC)
// On macOS 10.14 and higher, executables that are code signed with the // On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt // "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit" // into this capability by specifying the "com.apple.security.cs.allow-jit"
@ -379,6 +369,7 @@ bool TryRecommitSystemPagesInternal(
} }
void DiscardSystemPagesInternal(uintptr_t address, size_t length) { void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
#if !BUILDFLAG(IS_NACL)
void* ptr = reinterpret_cast<void*>(address); void* ptr = reinterpret_cast<void*>(address);
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
int ret = madvise(ptr, length, MADV_FREE_REUSABLE); int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
@ -387,7 +378,7 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
ret = madvise(ptr, length, MADV_DONTNEED); ret = madvise(ptr, length, MADV_DONTNEED);
} }
PA_PCHECK(ret == 0); PA_PCHECK(ret == 0);
#else // BUILDFLAG(IS_APPLE) #else
// We have experimented with other flags, but with suboptimal results. // We have experimented with other flags, but with suboptimal results.
// //
// MADV_FREE (Linux): Makes our memory measurements less predictable; // MADV_FREE (Linux): Makes our memory measurements less predictable;
@ -400,7 +391,8 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
return; return;
} }
PA_PCHECK(ret == 0); PA_PCHECK(ret == 0);
#endif // BUILDFLAG(IS_APPLE) #endif
#endif // !BUILDFLAG(IS_NACL)
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -10,8 +10,8 @@
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -28,7 +28,7 @@ bool IsOutOfMemory(DWORD error) {
case ERROR_COMMITMENT_MINIMUM: case ERROR_COMMITMENT_MINIMUM:
// Page file is too small. // Page file is too small.
case ERROR_COMMITMENT_LIMIT: case ERROR_COMMITMENT_LIMIT:
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// Not enough memory resources are available to process this command. // Not enough memory resources are available to process this command.
// //
// It is not entirely clear whether this error pertains to out of address // It is not entirely clear whether this error pertains to out of address

View File

@ -16,7 +16,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -37,7 +36,7 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
namespace { namespace {
@ -423,6 +422,6 @@ PageCharacteristics page_characteristics;
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -26,7 +26,7 @@
#include "build/build_config.h" #include "build/build_config.h"
// The feature is not applicable to 32-bit address space. // The feature is not applicable to 32-bit address space.
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
namespace partition_alloc { namespace partition_alloc {
@ -52,7 +52,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!IsInBRPPool(address)); PA_DCHECK(!IsInBRPPool(address));
#endif #endif
pool_handle pool = kNullPoolHandle; pool_handle pool = 0;
uintptr_t base = 0; uintptr_t base = 0;
if (IsInRegularPool(address)) { if (IsInRegularPool(address)) {
pool = kRegularPoolHandle; pool = kRegularPoolHandle;
@ -475,6 +475,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc } // namespace partition_alloc
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -14,6 +14,7 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h" #include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
@ -104,6 +105,16 @@ void PartitionAllocGlobalUninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
internal::PartitionAddressSpace::UninitPkeyPoolForTesting(); internal::PartitionAddressSpace::UninitPkeyPoolForTesting();
#endif #endif
#if BUILDFLAG(USE_STARSCAN)
internal::PCScan::UninitForTesting(); // IN-TEST
#endif
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance().ResetForTesting();
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::g_oom_handling_function = nullptr; internal::g_oom_handling_function = nullptr;
} }

View File

@ -5,23 +5,6 @@
import("//build/config/sanitizers/sanitizers.gni") import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/partition_alloc.gni") import("//build_overrides/partition_alloc.gni")
if (is_apple) {
import("//build/config/features.gni")
}
# Whether 64-bit pointers are used.
# A static_assert in partition_alloc_config.h verifies that.
if (is_nacl) {
# NaCl targets don't use 64-bit pointers.
has_64_bit_pointers = false
} else if (current_cpu == "x64" || current_cpu == "arm64" || current_cpu == "mips64el") {
has_64_bit_pointers = true
} else if (current_cpu == "x86" || current_cpu == "arm" || current_cpu == "mipsel") {
has_64_bit_pointers = false
} else {
assert(false, "Unknown CPU: $current_cpu")
}
if (use_partition_alloc_as_malloc_default) { if (use_partition_alloc_as_malloc_default) {
_default_allocator = "partition" _default_allocator = "partition"
} else { } else {
@ -136,14 +119,6 @@ declare_args() {
enable_dangling_raw_ptr_checks = enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enable the feature flags required to check for dangling pointers. That is to
# say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
#
# This is meant to be used on bots only. It is much easier to override the
# feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flags_for_bots = false
# Enables the dangling raw_ptr checks feature for the performance experiment. # Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid # Not every dangling pointers have been fixed or annotated yet. To avoid
# accounting for the cost of calling the PA's embedder's callbacks when a # accounting for the cost of calling the PA's embedder's callbacks when a
@ -161,20 +136,10 @@ declare_args() {
# Shadow metadata is still under development and only supports Linux # Shadow metadata is still under development and only supports Linux
# for now. # for now.
enable_shadow_metadata = false enable_shadow_metadata = false
if (is_apple) {
# use_blink currently assumes mach absolute ticks (eg, to ensure trace
# events cohere).
partition_alloc_enable_mach_absolute_time_ticks = is_mac || use_blink
}
} }
# *Scan is currently only used by Chromium, and supports only 64-bit. # *Scan is currently only used by Chromium.
use_starscan = build_with_chromium && has_64_bit_pointers use_starscan = build_with_chromium
pcscan_stack_supported =
use_starscan && (current_cpu == "x64" || current_cpu == "x86" ||
current_cpu == "arm" || current_cpu == "arm64")
# We want to provide assertions that guard against inconsistent build # We want to provide assertions that guard against inconsistent build
# args, but there is no point in having them fire if we're not building # args, but there is no point in having them fire if we're not building
@ -258,14 +223,6 @@ assert(!use_asan_backup_ref_ptr || is_asan,
assert(!use_asan_unowned_ptr || is_asan, assert(!use_asan_unowned_ptr || is_asan,
"AsanUnownedPtr requires AddressSanitizer") "AsanUnownedPtr requires AddressSanitizer")
if (is_apple) {
assert(!use_blink || partition_alloc_enable_mach_absolute_time_ticks,
"use_blink requires partition_alloc_enable_mach_absolute_time_ticks")
assert(!is_mac || partition_alloc_enable_mach_absolute_time_ticks,
"mac requires partition_alloc_enable_mach_absolute_time_ticks")
}
# AsanBackupRefPtr is not supported outside Chromium. The implementation is # AsanBackupRefPtr is not supported outside Chromium. The implementation is
# entangled with `//base`. The code is only physically located with the # entangled with `//base`. The code is only physically located with the
# rest of `raw_ptr` to keep it together. # rest of `raw_ptr` to keep it together.

View File

@ -137,7 +137,7 @@
#endif #endif
// MemorySanitizer annotations. // MemorySanitizer annotations.
#if defined(MEMORY_SANITIZER) #if defined(MEMORY_SANITIZER) && !BUILDFLAG(IS_NACL)
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>
// Mark a memory region fully initialized. // Mark a memory region fully initialized.

View File

@ -41,7 +41,13 @@
#if defined(COMPILER_GCC) #if defined(COMPILER_GCC)
#if defined(ARCH_CPU_X86_FAMILY) #if BUILDFLAG(IS_NACL)
// Crash report accuracy is not guaranteed on NaCl.
#define PA_TRAP_SEQUENCE1_() __builtin_trap()
#define PA_TRAP_SEQUENCE2_() asm volatile("")
#elif defined(ARCH_CPU_X86_FAMILY)
// TODO(https://crbug.com/958675): In theory, it should be possible to use just // TODO(https://crbug.com/958675): In theory, it should be possible to use just
// int3. However, there are a number of crashes with SIGILL as the exception // int3. However, there are a number of crashes with SIGILL as the exception

View File

@ -12,7 +12,7 @@
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
#if defined(__GLIBC__) #if defined(__GLIBC__) || BUILDFLAG(IS_NACL)
#define USE_HISTORICAL_STRERROR_R 1 #define USE_HISTORICAL_STRERROR_R 1
// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE // Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
// is defined, but the symbol is renamed to __gnu_strerror_r which only exists // is defined, but the symbol is renamed to __gnu_strerror_r which only exists

View File

@ -18,7 +18,7 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
#include "third_party/lss/linux_syscall_support.h" #include "third_party/lss/linux_syscall_support.h"
#elif BUILDFLAG(IS_MAC) #elif BUILDFLAG(IS_MAC)
// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK. // TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
@ -68,7 +68,7 @@ namespace partition_alloc::internal::base {
// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land // (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
// it or some form of it. // it or some form of it.
void RandBytes(void* output, size_t output_length) { void RandBytes(void* output, size_t output_length) {
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) #if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
// We have to call `getrandom` via Linux Syscall Support, rather than through // We have to call `getrandom` via Linux Syscall Support, rather than through
// the libc wrapper, because we might not have an up-to-date libc (e.g. on // the libc wrapper, because we might not have an up-to-date libc (e.g. on
// some bots). // some bots).

View File

@ -116,6 +116,11 @@ PlatformThreadId PlatformThread::CurrentId() {
return zx_thread_self(); return zx_thread_self();
#elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX) #elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX)
return pthread_self(); return pthread_self();
#elif BUILDFLAG(IS_NACL) && defined(__GLIBC__)
return pthread_self();
#elif BUILDFLAG(IS_NACL) && !defined(__GLIBC__)
// Pointers are 32-bits in NaCl.
return reinterpret_cast<int32_t>(pthread_self());
#elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX) #elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX)
return pthread_self(); return pthread_self();
#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX) #elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX)

View File

@ -26,7 +26,7 @@
#include <sys/resource.h> #include <sys/resource.h>
#endif #endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif #endif
@ -52,14 +52,16 @@ void* ThreadFunc(void* params) {
delegate = thread_params->delegate; delegate = thread_params->delegate;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if !BUILDFLAG(IS_NACL)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PCScan::NotifyThreadCreated(GetStackPointer()); PCScan::NotifyThreadCreated(GetStackPointer());
#endif
#endif #endif
} }
delegate->ThreadMain(); delegate->ThreadMain();
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if !BUILDFLAG(IS_NACL) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PCScan::NotifyThreadDestroyed(); PCScan::NotifyThreadDestroyed();
#endif #endif

View File

@ -14,7 +14,7 @@
#include <windows.h> #include <windows.h>
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h" #include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif #endif
@ -62,7 +62,7 @@ DWORD __stdcall ThreadFunc(void* params) {
GetCurrentProcess(), &platform_handle, 0, GetCurrentProcess(), &platform_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS); FALSE, DUPLICATE_SAME_ACCESS);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PCScan::NotifyThreadCreated(GetStackPointer()); PCScan::NotifyThreadCreated(GetStackPointer());
#endif #endif
@ -74,7 +74,7 @@ DWORD __stdcall ThreadFunc(void* params) {
delete thread_params; delete thread_params;
delegate->ThreadMain(); delegate->ThreadMain();
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PCScan::NotifyThreadDestroyed(); PCScan::NotifyThreadDestroyed();
#endif #endif
return 0; return 0;

View File

@ -75,10 +75,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_FUCHSIA) #if BUILDFLAG(IS_FUCHSIA)
#include <zircon/types.h> #include <zircon/types.h>
#endif #endif
@ -140,11 +136,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeDelta {
#if BUILDFLAG(IS_FUCHSIA) #if BUILDFLAG(IS_FUCHSIA)
static TimeDelta FromZxDuration(zx_duration_t nanos); static TimeDelta FromZxDuration(zx_duration_t nanos);
#endif #endif
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_MAC)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeDelta FromMachTime(uint64_t mach_time); static TimeDelta FromMachTime(uint64_t mach_time);
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(IS_APPLE)
// Converts an integer value representing TimeDelta to a class. This is used // Converts an integer value representing TimeDelta to a class. This is used
// when deserializing a |TimeDelta| structure, using a value known to be // when deserializing a |TimeDelta| structure, using a value known to be
@ -885,16 +879,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks
static TimeTicks FromQPCValue(LONGLONG qpc_value); static TimeTicks FromQPCValue(LONGLONG qpc_value);
#endif #endif
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_MAC)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time); static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
// Sets the current Mach timebase to `timebase`. Returns the old timebase. // Sets the current Mach timebase to `timebase`. Returns the old timebase.
static mach_timebase_info_data_t SetMachTimebaseInfoForTesting( static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
mach_timebase_info_data_t timebase); mach_timebase_info_data_t timebase);
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
// Converts to TimeTicks the value obtained from SystemClock.uptimeMillis(). // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
@ -987,7 +979,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadTicks
// Returns true if ThreadTicks::Now() is supported on this system. // Returns true if ThreadTicks::Now() is supported on this system.
[[nodiscard]] static bool IsSupported() { [[nodiscard]] static bool IsSupported() {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
return true; return true;
#elif BUILDFLAG(IS_WIN) #elif BUILDFLAG(IS_WIN)
return IsSupportedWin(); return IsSupportedWin();

View File

@ -14,21 +14,18 @@
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <time.h>
#if BUILDFLAG(IS_IOS)
#include <errno.h>
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h" #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h" #include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h" #include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
namespace { namespace {
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #if BUILDFLAG(IS_MAC)
// Returns a pointer to the initialized Mach timebase info struct. // Returns a pointer to the initialized Mach timebase info struct.
mach_timebase_info_data_t* MachTimebaseInfo() { mach_timebase_info_data_t* MachTimebaseInfo() {
static mach_timebase_info_data_t timebase_info = []() { static mach_timebase_info_data_t timebase_info = []() {
@ -81,32 +78,48 @@ int64_t MachTimeToMicroseconds(uint64_t mach_time) {
// 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277). // 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
return checked_cast<int64_t>(microseconds); return checked_cast<int64_t>(microseconds);
} }
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_MAC)
// Returns monotonically growing number of ticks in microseconds since some // Returns monotonically growing number of ticks in microseconds since some
// unspecified starting point. // unspecified starting point.
int64_t ComputeCurrentTicks() { int64_t ComputeCurrentTicks() {
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #if BUILDFLAG(IS_IOS)
struct timespec tp; // iOS 10 supports clock_gettime(CLOCK_MONOTONIC, ...), which is
// clock_gettime() returns 0 on success and -1 on failure. Failure can only // around 15 times faster than sysctl() call. Use it if possible;
// happen because of bad arguments (unsupported clock type or timespec // otherwise, fall back to sysctl().
// pointer out of accessible address space). Here it is known that neither if (__builtin_available(iOS 10, *)) {
// can happen since the timespec parameter is stack allocated right above and struct timespec tp;
// `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
// supported on. return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
int res = clock_gettime(CLOCK_MONOTONIC, &tp); }
PA_DCHECK(0 == res) << "Failed clock_gettime, errno: " << errno; }
return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000; // On iOS mach_absolute_time stops while the device is sleeping. Instead use
// now - KERN_BOOTTIME to get a time difference that is not impacted by clock
// changes. KERN_BOOTTIME will be updated by the system whenever the system
// clock change.
struct timeval boottime;
int mib[2] = {CTL_KERN, KERN_BOOTTIME};
size_t size = sizeof(boottime);
int kr = sysctl(mib, std::size(mib), &boottime, &size, nullptr, 0);
PA_DCHECK(KERN_SUCCESS == kr);
TimeDelta time_difference =
subtle::TimeNowIgnoringOverride() -
(Time::FromTimeT(boottime.tv_sec) + Microseconds(boottime.tv_usec));
return time_difference.InMicroseconds();
#else #else
// mach_absolute_time is it when it comes to ticks on the Mac. Other calls // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
// with less precision (such as TickCount) just call through to // with less precision (such as TickCount) just call through to
// mach_absolute_time. // mach_absolute_time.
return MachTimeToMicroseconds(mach_absolute_time()); return MachTimeToMicroseconds(mach_absolute_time());
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_IOS)
} }
int64_t ComputeThreadTicks() { int64_t ComputeThreadTicks() {
#if BUILDFLAG(IS_IOS)
PA_NOTREACHED();
return 0;
#else
// The pthreads library keeps a cached reference to the thread port, which // The pthreads library keeps a cached reference to the thread port, which
// does not have to be released like mach_thread_self() does. // does not have to be released like mach_thread_self() does.
mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
@ -129,6 +142,7 @@ int64_t ComputeThreadTicks() {
absolute_micros += (thread_info_data.user_time.microseconds + absolute_micros += (thread_info_data.user_time.microseconds +
thread_info_data.system_time.microseconds); thread_info_data.system_time.microseconds);
return absolute_micros.ValueOrDie(); return absolute_micros.ValueOrDie();
#endif // BUILDFLAG(IS_IOS)
} }
} // namespace } // namespace
@ -186,12 +200,12 @@ NSDate* Time::ToNSDate() const {
// TimeDelta ------------------------------------------------------------------ // TimeDelta ------------------------------------------------------------------
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #if BUILDFLAG(IS_MAC)
// static // static
TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) { TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
return Microseconds(MachTimeToMicroseconds(mach_time)); return Microseconds(MachTimeToMicroseconds(mach_time));
} }
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_MAC)
// TimeTicks ------------------------------------------------------------------ // TimeTicks ------------------------------------------------------------------
@ -211,7 +225,7 @@ bool TimeTicks::IsConsistentAcrossProcesses() {
return true; return true;
} }
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #if BUILDFLAG(IS_MAC)
// static // static
TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) { TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
return TimeTicks(MachTimeToMicroseconds(mach_absolute_time)); return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
@ -227,15 +241,15 @@ mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
return orig_timebase; return orig_timebase;
} }
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_MAC)
// static // static
TimeTicks::Clock TimeTicks::GetClock() { TimeTicks::Clock TimeTicks::GetClock() {
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #if BUILDFLAG(IS_IOS)
return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME; return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
#else #else
return Clock::MAC_MACH_ABSOLUTE_TIME; return Clock::MAC_MACH_ABSOLUTE_TIME;
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS) #endif // BUILDFLAG(IS_IOS)
} }
// ThreadTicks ---------------------------------------------------------------- // ThreadTicks ----------------------------------------------------------------

View File

@ -23,25 +23,37 @@
// 4. Do not use PA_CONFIG() when defining config macros, or it will lead to // 4. Do not use PA_CONFIG() when defining config macros, or it will lead to
// recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly. // recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly.
// 5. Try to use constexpr instead of macros wherever possible. // 5. Try to use constexpr instead of macros wherever possible.
// TODO(bartekn): Convert macros to constexpr or BUILDFLAG as much as possible. // TODO(bartekn): Convert macros to constexpr as much as possible.
#define PA_CONFIG(flag) (PA_CONFIG_##flag()) #define PA_CONFIG(flag) (PA_CONFIG_##flag())
// Assert that the heuristic in partition_alloc.gni is accurate on supported // ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
// configurations. // address space. The only known case where address space is 32-bit is NaCl, so
#if BUILDFLAG(HAS_64_BIT_POINTERS) // eliminate it explicitly. static_assert below ensures that others won't slip
// through.
#define PA_CONFIG_HAS_64_BITS_POINTERS() \
(defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL))
#if PA_CONFIG(HAS_64_BITS_POINTERS)
static_assert(sizeof(void*) == 8, ""); static_assert(sizeof(void*) == 8, "");
#else #else
static_assert(sizeof(void*) != 8, ""); static_assert(sizeof(void*) != 8, "");
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && \ // PCScan supports 64 bits only and is disabled outside Chromium.
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#define PA_CONFIG_ALLOW_PCSCAN() 1
#else
#define PA_CONFIG_ALLOW_PCSCAN() 0
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && \
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP) (defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1 #define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
#else #else
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0 #define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
#endif #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS)
// Allow PA to select an alternate pool size at run-time before initialization, // Allow PA to select an alternate pool size at run-time before initialization,
// rather than using a single constexpr value. // rather than using a single constexpr value.
// //
@ -52,19 +64,19 @@ static_assert(sizeof(void*) != 8, "");
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1 #define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1
#else #else
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0 #define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS)
// Puts the regular and BRP pools right next to each other, so that we can // Puts the regular and BRP pools right next to each other, so that we can
// check "belongs to one of the two pools" with a single bitmask operation. // check "belongs to one of the two pools" with a single bitmask operation.
// //
// This setting is specific to 64-bit, as 32-bit has a different implementation. // This setting is specific to 64-bit, as 32-bit has a different implementation.
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS)
#define PA_CONFIG_GLUE_CORE_POOLS() 1 #define PA_CONFIG_GLUE_CORE_POOLS() 1
#else #else
#define PA_CONFIG_GLUE_CORE_POOLS() 0 #define PA_CONFIG_GLUE_CORE_POOLS() 0
#endif #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && \ #if PA_CONFIG(HAS_64_BITS_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)) (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#include <linux/version.h> #include <linux/version.h>
// TODO(bikineev): Enable for ChromeOS. // TODO(bikineev): Enable for ChromeOS.
@ -72,10 +84,10 @@ static_assert(sizeof(void*) != 8, "");
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)) (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
#else #else
#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0 #define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) && #endif // PA_CONFIG(HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)) // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
// Use card table to avoid races for PCScan configuration without safepoints. // Use card table to avoid races for PCScan configuration without safepoints.
// The card table provides the guaranteee that for a marked card the underling // The card table provides the guaranteee that for a marked card the underling
// super-page is fully initialized. // super-page is fully initialized.
@ -83,7 +95,11 @@ static_assert(sizeof(void*) != 8, "");
#else #else
// The card table is permanently disabled for 32-bit. // The card table is permanently disabled for 32-bit.
#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0 #define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE) && !PA_CONFIG(ALLOW_PCSCAN)
#error "Card table can only be used when *Scan is allowed"
#endif
// Use batched freeing when sweeping pages. This builds up a freelist in the // Use batched freeing when sweeping pages. This builds up a freelist in the
// scanner thread and appends to the slot-span's freelist only once. // scanner thread and appends to the slot-span's freelist only once.
@ -168,7 +184,7 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8); static_assert(sizeof(void*) == 8);
#endif #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#define PA_CONFIG_USE_OOB_POISON() 1 #define PA_CONFIG_USE_OOB_POISON() 1
#else #else
#define PA_CONFIG_USE_OOB_POISON() 0 #define PA_CONFIG_USE_OOB_POISON() 0
@ -179,7 +195,7 @@ static_assert(sizeof(void*) == 8);
// Only applicable to code with 64-bit pointers. Currently conflicts with true // Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE. // hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \ #if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING) PA_CONFIG(HAS_64_BITS_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
static_assert(sizeof(void*) == 8); static_assert(sizeof(void*) == 8);
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1 #define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
#else #else
@ -305,7 +321,7 @@ constexpr bool kUseLazyCommit = false;
// This feature is only enabled with 64-bit environment because pools work // This feature is only enabled with 64-bit environment because pools work
// differently with 32-bits pointers (see glossary). // differently with 32-bits pointers (see glossary).
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \ #if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
BUILDFLAG(HAS_64_BIT_POINTERS) PA_CONFIG(HAS_64_BITS_POINTERS)
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 1 #define PA_CONFIG_ENABLE_SHADOW_METADATA() 1
#else #else
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 0 #define PA_CONFIG_ENABLE_SHADOW_METADATA() 0
@ -324,7 +340,7 @@ constexpr bool kUseLazyCommit = false;
// Enables compressed (4-byte) pointers that can point within the core pools // Enables compressed (4-byte) pointers that can point within the core pools
// (Regular + BRP). // (Regular + BRP).
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 1 #define PA_CONFIG_POINTER_COMPRESSION() 1
#if !PA_CONFIG(GLUE_CORE_POOLS) #if !PA_CONFIG(GLUE_CORE_POOLS)
@ -338,7 +354,7 @@ constexpr bool kUseLazyCommit = false;
// TODO(1376980): Address MTE once it's enabled. // TODO(1376980): Address MTE once it's enabled.
#error "Compressed pointers don't support tag in the upper bits" #error "Compressed pointers don't support tag in the upper bits"
#endif #endif
#else // BUILDFLAG(HAS_64_BIT_POINTERS) && #else // PA_CONFIG(HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_POINTER_COMPRESSION) // BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 0 #define PA_CONFIG_POINTER_COMPRESSION() 0
#endif #endif

View File

@ -13,7 +13,6 @@
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -261,29 +260,12 @@ constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// PartitionAlloc's address space is split into pools. See `glossary.md`. // PartitionAlloc's address space is split into pools. See `glossary.md`.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
enum pool_handle : unsigned {
kNullPoolHandle = 0u,
kRegularPoolHandle,
kBRPPoolHandle,
#if BUILDFLAG(HAS_64_BIT_POINTERS)
kConfigurablePoolHandle,
#endif
// New pool_handles will be added here.
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
// The pkey pool must come last since we pkey_mprotect its entry in the constexpr size_t kNumPools = 4;
// metadata tables, e.g. AddressPoolManager::aligned_pools_ #else
kPkeyPoolHandle, constexpr size_t kNumPools = 3;
#endif #endif
kMaxPoolHandle
};
// kNullPoolHandle doesn't have metadata, hence - 1
constexpr size_t kNumPools = kMaxPoolHandle - 1;
// Maximum pool size. With exception of Configurable Pool, it is also // Maximum pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
// allows to choose a different size at initialization time for certain // allows to choose a different size at initialization time for certain
@ -295,18 +277,22 @@ constexpr size_t kNumPools = kMaxPoolHandle - 1;
// //
// When pointer compression is enabled, we cannot use large pools (at most // When pointer compression is enabled, we cannot use large pools (at most
// 8GB for each of the glued pools). // 8GB for each of the glued pools).
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION)
constexpr size_t kPoolMaxSize = 8 * kGiB; constexpr size_t kPoolMaxSize = 8 * kGiB;
#else #else
constexpr size_t kPoolMaxSize = 16 * kGiB; constexpr size_t kPoolMaxSize = 16 * kGiB;
#endif #endif
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_CONFIG(HAS_64_BITS_POINTERS)
constexpr size_t kNumPools = 2;
constexpr size_t kPoolMaxSize = 4 * kGiB; constexpr size_t kPoolMaxSize = 4 * kGiB;
#endif #endif
constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize; constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
static constexpr pool_handle kRegularPoolHandle = 1;
static constexpr pool_handle kBRPPoolHandle = 2;
static constexpr pool_handle kConfigurablePoolHandle = 3;
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
static constexpr pool_handle kPkeyPoolHandle = 4;
static_assert( static_assert(
kPkeyPoolHandle == kNumPools, kPkeyPoolHandle == kNumPools,
"The pkey pool must come last since we pkey_mprotect its metadata."); "The pkey pool must come last since we pkey_mprotect its metadata.");
@ -341,7 +327,7 @@ constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool; return kMaxSuperPagesInPool;
} }
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size, // In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools. // because this is the reservation granularity of the pools.
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
@ -351,7 +337,7 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift; return kSuperPageShift;
} }
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_CONFIG(HAS_64_BITS_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system // In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation // allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps // unit. However, don't go below partition page size, so that pool bitmaps
@ -365,7 +351,7 @@ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() { DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift()); return std::max(PageAllocationGranularityShift(), PartitionPageShift());
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() { DirectMapAllocationGranularityOffsetMask() {

View File

@ -1,50 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#include "base/allocator/partition_allocator/partition_alloc.h"
namespace partition_alloc {
namespace internal {
constexpr bool AllowLeaks = true;
constexpr bool DisallowLeaks = false;
// A subclass of PartitionAllocator for testing. It will free all resources,
// i.e. allocated memory, memory inside freelist, and so on, when destructing
// it or when manually invoking reset().
// If need to check if there are any memory allocated but not freed yet,
// use allow_leaks=false. We will see CHECK failure inside reset() if any
// leak is detected. Otherwise (e.g. intentional leaks), use allow_leaks=true.
template <bool thread_safe, bool allow_leaks>
struct PartitionAllocatorForTesting : public PartitionAllocator<thread_safe> {
PartitionAllocatorForTesting() : PartitionAllocator<thread_safe>() {}
explicit PartitionAllocatorForTesting(PartitionOptions opts)
: PartitionAllocator<thread_safe>() {
PartitionAllocator<thread_safe>::init(opts);
}
~PartitionAllocatorForTesting() { reset(); }
PA_ALWAYS_INLINE void reset() {
PartitionAllocator<thread_safe>::root()->ResetForTesting(allow_leaks);
}
};
} // namespace internal
using PartitionAllocatorForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::DisallowLeaks>;
using PartitionAllocatorAllowLeaksForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::AllowLeaks>;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_

View File

@ -32,8 +32,6 @@ std::atomic<PartitionAllocHooks::FreeOverrideHook*>
PartitionAllocHooks::free_override_hook_(nullptr); PartitionAllocHooks::free_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::ReallocOverrideHook*> std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
PartitionAllocHooks::realloc_override_hook_(nullptr); PartitionAllocHooks::realloc_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::QuarantineOverrideHook*>
PartitionAllocHooks::quarantine_override_hook_(nullptr);
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) { FreeObserverHook* free_hook) {
@ -120,9 +118,4 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
return false; return false;
} }
void PartitionAllocHooks::SetQuarantineOverrideHook(
QuarantineOverrideHook* hook) {
quarantine_override_hook_.store(hook, std::memory_order_release);
}
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -8,7 +8,6 @@
#include <atomic> #include <atomic>
#include <cstddef> #include <cstddef>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc { namespace partition_alloc {
@ -35,12 +34,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
// the size of the underlying allocation. // the size of the underlying allocation.
typedef bool ReallocOverrideHook(size_t* out, void* address); typedef bool ReallocOverrideHook(size_t* out, void* address);
// Special hook type, independent of the rest. Triggered when `free()` detects
// outstanding references to the allocation.
// IMPORTANT: Make sure the hook always overwrites `[address, address + size)`
// with a bit pattern that cannot be interpreted as a valid memory address.
typedef void QuarantineOverrideHook(void* address, size_t size);
// To unhook, call Set*Hooks with nullptrs. // To unhook, call Set*Hooks with nullptrs.
static void SetObserverHooks(AllocationObserverHook* alloc_hook, static void SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook); FreeObserverHook* free_hook);
@ -72,12 +65,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
const char* type_name); const char* type_name);
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address); static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
PA_ALWAYS_INLINE static QuarantineOverrideHook* GetQuarantineOverrideHook() {
return quarantine_override_hook_.load(std::memory_order_acquire);
}
static void SetQuarantineOverrideHook(QuarantineOverrideHook* hook);
private: private:
// Single bool that is used to indicate whether observer or allocation hooks // Single bool that is used to indicate whether observer or allocation hooks
// are set to reduce the numbers of loads required to check whether hooking is // are set to reduce the numbers of loads required to check whether hooking is
@ -91,8 +78,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
static std::atomic<AllocationOverrideHook*> allocation_override_hook_; static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
static std::atomic<FreeOverrideHook*> free_override_hook_; static std::atomic<FreeOverrideHook*> free_override_hook_;
static std::atomic<ReallocOverrideHook*> realloc_override_hook_; static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
static std::atomic<QuarantineOverrideHook*> quarantine_override_hook_;
}; };
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -38,7 +38,7 @@
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#endif #endif
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -74,7 +74,7 @@ template <bool thread_safe>
PA_IMMEDIATE_CRASH(); // Not required, kept as documentation. PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
} }
#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means // |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
// that a partial super page is allowed at the end. Since the block list uses // that a partial super page is allowed at the end. Since the block list uses
// kSuperPageSize granularity, a partial super page is considered blocked if // kSuperPageSize granularity, a partial super page is considered blocked if
@ -93,7 +93,7 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
} }
return true; return true;
} }
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) && #endif // !PA_CONFIG(HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// Reserves |requested_size| worth of super pages from the specified pool. // Reserves |requested_size| worth of super pages from the specified pool.
@ -123,7 +123,7 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
// In 32-bit mode, when allocating from BRP pool, verify that the requested // In 32-bit mode, when allocating from BRP pool, verify that the requested
// allocation honors the block list. Find a better address otherwise. // allocation honors the block list. Find a better address otherwise.
#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) { if (pool == kBRPPoolHandle) {
constexpr int kMaxRandomAddressTries = 10; constexpr int kMaxRandomAddressTries = 10;
for (int i = 0; i < kMaxRandomAddressTries; ++i) { for (int i = 0; i < kMaxRandomAddressTries; ++i) {
@ -172,10 +172,10 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
reserved_address = 0; reserved_address = 0;
} }
} }
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) && #endif // !PA_CONFIG(HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
// Only mark the region as belonging to the pool after it has passed the // Only mark the region as belonging to the pool after it has passed the
// blocklist check in order to avoid a potential race with destructing a // blocklist check in order to avoid a potential race with destructing a
// raw_ptr<T> object that points to non-PA memory in another thread. // raw_ptr<T> object that points to non-PA memory in another thread.
@ -284,7 +284,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{ {
// Reserving memory from the pool is actually not a syscall on 64 bit // Reserving memory from the pool is actually not a syscall on 64 bit
// platforms. // platforms.
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
#endif #endif
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size); reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
@ -434,7 +434,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{ {
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start, AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size); reservation_size);
#endif #endif

View File

@ -9,8 +9,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -39,13 +39,13 @@ constexpr size_t OrderSubIndexMask(uint8_t order) {
(kNumBucketsPerOrderBits + 1); (kNumBucketsPerOrderBits + 1);
} }
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
#define PA_BITS_PER_SIZE_T 64 #define PA_BITS_PER_SIZE_T 64
static_assert(kBitsPerSizeT == 64, ""); static_assert(kBitsPerSizeT == 64, "");
#else #else
#define PA_BITS_PER_SIZE_T 32 #define PA_BITS_PER_SIZE_T 32
static_assert(kBitsPerSizeT == 32, ""); static_assert(kBitsPerSizeT == 32, "");
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = { inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2), OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),

View File

@ -249,7 +249,9 @@ class PartitionFreelistEntry {
(next_address & kSuperPageBaseMask); (next_address & kSuperPageBaseMask);
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
bool marked_as_free_in_bitmap = bool marked_as_free_in_bitmap =
for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address); for_thread_cache
? true
: !FreeSlotBitmapSlotIsUsed(reinterpret_cast<uintptr_t>(next));
#else #else
bool marked_as_free_in_bitmap = true; bool marked_as_free_in_bitmap = true;
#endif #endif

View File

@ -17,6 +17,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
@ -322,26 +323,23 @@ void UnmapNow(uintptr_t reservation_start,
// In 32-bit mode, the beginning of a reservation may be excluded from the // In 32-bit mode, the beginning of a reservation may be excluded from the
// BRP pool, so shift the pointer. Other pools don't have this logic. // BRP pool, so shift the pointer. Other pools don't have this logic.
PA_DCHECK(IsManagedByPartitionAllocBRPPool( PA_DCHECK(IsManagedByPartitionAllocBRPPool(
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
reservation_start reservation_start
#else #else
reservation_start + reservation_start +
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
)); ));
} else } else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{ {
PA_DCHECK(pool == kRegularPoolHandle PA_DCHECK(
pool == kRegularPoolHandle
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
|| pool == kPkeyPoolHandle || pool == kPkeyPoolHandle
#endif #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) || (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle));
||
(IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)
#endif
);
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode. // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) || PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
@ -367,7 +365,7 @@ void UnmapNow(uintptr_t reservation_start,
*offset_ptr++ = kOffsetTagNotAllocated; *offset_ptr++ = kOffsetTagNotAllocated;
} }
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start, AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size); reservation_size);
#endif #endif

View File

@ -21,6 +21,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h" #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
@ -137,7 +138,7 @@ struct SlotSpanMetadata {
PartitionBucket<thread_safe>* const bucket = nullptr; PartitionBucket<thread_safe>* const bucket = nullptr;
// CHECK()ed in AllocNewSlotSpan(). // CHECK()ed in AllocNewSlotSpan().
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE) #if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
// System page size is not a constant on Apple OSes, but is either 4 or 16kiB // System page size is not a constant on Apple OSes, but is either 4 or 16kiB
// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And // (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
// PartitionPageSize() is 4 times the OS page size. // PartitionPageSize() is 4 times the OS page size.
@ -154,7 +155,7 @@ struct SlotSpanMetadata {
// larger, so it doesn't have as many slots. // larger, so it doesn't have as many slots.
static constexpr size_t kMaxSlotsPerSlotSpan = static constexpr size_t kMaxSlotsPerSlotSpan =
PartitionPageSize() / kSmallestBucket; PartitionPageSize() / kSmallestBucket;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE) #endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
// The maximum number of bits needed to cover all currently supported OSes. // The maximum number of bits needed to cover all currently supported OSes.
static constexpr size_t kMaxSlotsPerSlotSpanBits = 13; static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), ""); static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
@ -481,8 +482,7 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
return reinterpret_cast<AllocationStateMap*>( return reinterpret_cast<AllocationStateMap*>(
SuperPageStateBitmapAddr(super_page)); SuperPageStateBitmapAddr(super_page));
} }
#else
#else // BUILDFLAG(USE_STARSCAN)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() { ReservedStateBitmapSize() {

View File

@ -736,24 +736,13 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
// this function on PartitionRoots without a thread cache. // this function on PartitionRoots without a thread cache.
PA_CHECK(!flags.with_thread_cache); PA_CHECK(!flags.with_thread_cache);
auto pool_handle = ChoosePool(); auto pool_handle = ChoosePool();
#if BUILDFLAG(ENABLE_PKEYS)
// The pages managed by pkey will be free-ed at UninitPKeyForTesting().
// Don't invoke FreePages() for the pages.
if (pool_handle == internal::kPkeyPoolHandle) {
return;
}
PA_DCHECK(pool_handle < internal::kNumPools);
#else
PA_DCHECK(pool_handle <= internal::kNumPools);
#endif
auto* curr = first_extent; auto* curr = first_extent;
while (curr != nullptr) { while (curr != nullptr) {
auto* next = curr->next; auto* next = curr->next;
uintptr_t address = SuperPagesBeginFromExtent(curr); uintptr_t address = SuperPagesBeginFromExtent(curr);
size_t size = size_t size =
internal::kSuperPageSize * curr->number_of_consecutive_super_pages; internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address, internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
size); size);
#endif #endif
@ -770,7 +759,7 @@ void PartitionRoot<thread_safe>::EnableMac11MallocSizeHackForTesting() {
} }
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK) #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS)
namespace { namespace {
std::atomic<bool> g_reserve_brp_guard_region_called; std::atomic<bool> g_reserve_brp_guard_region_called;
// An address constructed by repeating `kQuarantinedByte` shouldn't never point // An address constructed by repeating `kQuarantinedByte` shouldn't never point
@ -806,7 +795,7 @@ void ReserveBackupRefPtrGuardRegionIfNeeded() {
} }
} // namespace } // namespace
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
// !BUILDFLAG(HAS_64_BIT_POINTERS) // !PA_CONFIG(HAS_64_BITS_POINTERS)
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) { void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
@ -835,12 +824,12 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// running on the right hardware. // running on the right hardware.
::partition_alloc::internal::InitializeMTESupportIfNeeded(); ::partition_alloc::internal::InitializeMTESupportIfNeeded();
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// Reserve address space for partition alloc. // Reserve address space for partition alloc.
internal::PartitionAddressSpace::Init(); internal::PartitionAddressSpace::Init();
#endif #endif
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS)
ReserveBackupRefPtrGuardRegionIfNeeded(); ReserveBackupRefPtrGuardRegionIfNeeded();
#endif #endif
@ -1510,73 +1499,6 @@ void PartitionRoot<thread_safe>::DeleteForTesting(
delete partition_root; delete partition_root;
} }
template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetForTesting(bool allow_leaks) {
if (flags.with_thread_cache) {
ThreadCache::SwapForTesting(nullptr);
flags.with_thread_cache = false;
}
::partition_alloc::internal::ScopedGuard guard(lock_);
#if BUILDFLAG(PA_DCHECK_IS_ON)
if (!allow_leaks) {
unsigned num_allocated_slots = 0;
for (Bucket& bucket : buckets) {
if (bucket.active_slot_spans_head !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket.active_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
num_allocated_slots += slot_span->num_allocated_slots;
}
}
// Full slot spans are nowhere. Need to see bucket.num_full_slot_spans
// to count the number of full slot spans' slots.
if (bucket.num_full_slot_spans) {
num_allocated_slots +=
bucket.num_full_slot_spans * bucket.get_slots_per_span();
}
}
PA_DCHECK(num_allocated_slots == 0);
// Check for direct-mapped allocations.
PA_DCHECK(!direct_map_list);
}
#endif
DestructForTesting(); // IN-TEST
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
if (initialized) {
internal::PartitionRootEnumerator::Instance().Unregister(this);
}
#endif // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
for (Bucket& bucket : buckets) {
bucket.active_slot_spans_head =
SlotSpan::get_sentinel_slot_span_non_const();
bucket.empty_slot_spans_head = nullptr;
bucket.decommitted_slot_spans_head = nullptr;
bucket.num_full_slot_spans = 0;
}
next_super_page = 0;
next_partition_page = 0;
next_partition_page_end = 0;
current_extent = nullptr;
first_extent = nullptr;
direct_map_list = nullptr;
for (auto& entity : global_empty_slot_span_ring) {
entity = nullptr;
}
global_empty_slot_span_ring_index = 0;
global_empty_slot_span_ring_size = internal::kDefaultEmptySlotSpanRingSize;
initialized = false;
}
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() { void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() {
::partition_alloc::internal::ScopedGuard guard{lock_}; ::partition_alloc::internal::ScopedGuard guard{lock_};

View File

@ -74,6 +74,7 @@
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#endif #endif
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
@ -405,8 +406,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionRoot() PartitionRoot()
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {} : flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); } explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
// TODO(tasak): remove ~PartitionRoot() after confirming all tests
// don't need ~PartitionRoot().
~PartitionRoot(); ~PartitionRoot();
// This will unreserve any space in the pool that the PartitionRoot is // This will unreserve any space in the pool that the PartitionRoot is
@ -586,7 +585,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionStatsDumper* partition_stats_dumper); PartitionStatsDumper* partition_stats_dumper);
static void DeleteForTesting(PartitionRoot* partition_root); static void DeleteForTesting(PartitionRoot* partition_root);
void ResetForTesting(bool allow_leaks);
void ResetBookkeepingForTesting(); void ResetBookkeepingForTesting();
PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const { PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
@ -655,12 +653,10 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
} }
internal::pool_handle ChoosePool() const { internal::pool_handle ChoosePool() const {
#if BUILDFLAG(HAS_64_BIT_POINTERS)
if (flags.use_configurable_pool) { if (flags.use_configurable_pool) {
PA_DCHECK(IsConfigurablePoolAvailable()); PA_DCHECK(IsConfigurablePoolAvailable());
return internal::kConfigurablePoolHandle; return internal::kConfigurablePoolHandle;
} }
#endif
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
if (flags.pkey != internal::kDefaultPkey) { if (flags.pkey != internal::kDefaultPkey) {
return internal::kPkeyPoolHandle; return internal::kPkeyPoolHandle;
@ -966,13 +962,13 @@ class ScopedSyscallTimer {
PA_ALWAYS_INLINE uintptr_t PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) { PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
PA_DCHECK(IsManagedByPartitionAllocBRPPool(address)); PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// Use this variant of GetDirectMapReservationStart as it has better // Use this variant of GetDirectMapReservationStart as it has better
// performance. // performance.
uintptr_t offset = OffsetInBRPPool(address); uintptr_t offset = OffsetInBRPPool(address);
uintptr_t reservation_start = uintptr_t reservation_start =
GetDirectMapReservationStart(address, kBRPPoolHandle, offset); GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else
uintptr_t reservation_start = GetDirectMapReservationStart(address); uintptr_t reservation_start = GetDirectMapReservationStart(address);
#endif #endif
if (!reservation_start) { if (!reservation_start) {
@ -1400,13 +1396,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// potential use-after-free issues into unexploitable crashes. // potential use-after-free issues into unexploitable crashes.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() && if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
brp_zapping_enabled())) { brp_zapping_enabled())) {
auto usable_size = slot_span->GetUsableSize(this); internal::SecureMemset(object, internal::kQuarantinedByte,
auto hook = PartitionAllocHooks::GetQuarantineOverrideHook(); slot_span->GetUsableSize(this));
if (PA_UNLIKELY(hook)) {
hook(object, usable_size);
} else {
internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
}
} }
if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) { if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {

View File

@ -11,12 +11,12 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
#error "pkey support requires 64 bit pointers" #error "pkey support requires 64 bit pointers"
#endif #endif

View File

@ -85,90 +85,130 @@ namespace base {
// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a // NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
// lightweight replacement of a raw pointer, hence performance is critical. // lightweight replacement of a raw pointer, hence performance is critical.
// This is a bitfield representing the different flags that can be applied to a
// raw_ptr.
//
// Internal use only: Developers shouldn't use those values directly.
//
// Housekeeping rules: Try not to change trait values, so that numeric trait
// values stay constant across builds (could be useful e.g. when analyzing stack
// traces). A reasonable exception to this rule are `*ForTest` traits. As a
// matter of fact, we propose that new non-test traits are added before the
// `*ForTest` traits.
enum class RawPtrTraits : unsigned {
kEmpty = 0,
// Disables dangling pointer detection, but keeps other raw_ptr protections.
//
// Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
// instead.
kMayDangle = (1 << 0),
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Disables any protections when MTECheckedPtrImpl is requested, by
// switching to NoOpImpl in that case.
//
// Don't use directly, use DegradeToNoOpWhenMTE instead.
kDisableMTECheckedPtr = (1 << 1),
#else
kDisableMTECheckedPtr = kEmpty,
#endif
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
// Disables any hooks, by switching to NoOpImpl in that case.
//
// Internal use only.
kDisableHooks = (1 << 2),
#else
kDisableHooks = kEmpty,
#endif
// Pointer arithmetic is discouraged and disabled by default.
//
// Don't use directly, use AllowPtrArithmetic instead.
kAllowPtrArithmetic = (1 << 3),
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track
// the number of times the raw_ptr is wrapped, unwrapped, etc.
//
// Test only.
kUseCountingWrapperForTest = (1 << 4),
};
// Used to combine RawPtrTraits:
constexpr RawPtrTraits operator|(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) |
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator&(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) &
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator~(RawPtrTraits a) {
return static_cast<RawPtrTraits>(~static_cast<unsigned>(a));
}
namespace raw_ptr_traits { namespace raw_ptr_traits {
constexpr bool Contains(RawPtrTraits a, RawPtrTraits b) { // Disables dangling pointer detection, but keeps other raw_ptr protections.
return (a & b) != RawPtrTraits::kEmpty; // Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
} // instead.
struct MayDangle {};
// Disables any protections when MTECheckedPtrImpl is requested, by switching to
// NoOpImpl in that case.
// Don't use directly, use DegradeToNoOpWhenMTE instead.
struct DisableMTECheckedPtr {};
// Disables any hooks, by switching to NoOpImpl in that case.
// Internal use only.
struct DisableHooks {};
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track the
// number of times the raw_ptr is wrapped, unrwapped, etc.
// Test only.
struct UseCountingWrapperForTest {};
// Very internal use only.
using EmptyTrait = void;
constexpr RawPtrTraits Remove(RawPtrTraits a, RawPtrTraits b) { template <typename Trait>
return a & ~b; inline constexpr bool IsValidTraitV =
} std::is_same_v<Trait, MayDangle> ||
std::is_same_v<Trait, DisableMTECheckedPtr> ||
std::is_same_v<Trait, DisableHooks> ||
std::is_same_v<Trait, UseCountingWrapperForTest> ||
std::is_same_v<Trait, EmptyTrait>;
constexpr bool AreValid(RawPtrTraits traits) { template <typename... Traits>
return Remove(traits, RawPtrTraits::kMayDangle | struct TraitPack {
RawPtrTraits::kDisableMTECheckedPtr | static_assert((IsValidTraitV<Traits> && ...), "Unknown raw_ptr trait");
RawPtrTraits::kDisableHooks |
RawPtrTraits::kAllowPtrArithmetic |
RawPtrTraits::kUseCountingWrapperForTest) ==
RawPtrTraits::kEmpty;
}
template <RawPtrTraits Traits> template <typename TraitToSearch>
static inline constexpr bool HasV =
(std::is_same_v<TraitToSearch, Traits> || ...);
};
// Replaces an unwanted trait with EmptyTrait.
template <typename TraitToExclude>
struct ExcludeTrait {
template <typename Trait>
using Filter = std::
conditional_t<std::is_same_v<TraitToExclude, Trait>, EmptyTrait, Trait>;
};
// Use TraitBundle alias, instead of TraitBundleInt, so that traits in different
// order and duplicates resolve to the same underlying type. For example,
// TraitBundle<A,B> is the same C++ type as TraitBundle<B,A,B,A>. This also
// allows to entirely ignore a trait under some build configurations, to prevent
// it from turning TraitBundle into a different C++ type.
//
// It'd be easier to just pass bools into TraitBundleInt, instead of echo'ing
// the trait, but that would lead to less readable compiler messages that spit
// out the type. TraitBundleInt<MayDangle,EmptyTrait,DisableHooks,EmptyTrait> is
// more readable than TraitBundleInt<true,false,true,false>.
template <typename... Traits>
struct TraitBundleInt;
template <typename... Traits>
using TraitBundle = TraitBundleInt<
std::conditional_t<TraitPack<Traits...>::template HasV<MayDangle>,
MayDangle,
EmptyTrait>,
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
std::conditional_t<
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>,
DisableMTECheckedPtr,
EmptyTrait>,
#else
// Entirely ignore DisableMTECheckedPtr on non-MTECheckedPtr builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<TraitPack<Traits...>::template HasV<DisableHooks>,
DisableHooks,
EmptyTrait>,
#else
// Entirely ignore DisableHooks on non-ASanBRP builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>,
UseCountingWrapperForTest,
EmptyTrait>>;
template <typename... Traits>
struct TraitBundleInt {
static constexpr bool kMayDangle =
TraitPack<Traits...>::template HasV<MayDangle>;
static constexpr bool kDisableMTECheckedPtr =
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>;
static constexpr bool kDisableHooks =
TraitPack<Traits...>::template HasV<DisableHooks>;
static constexpr bool kUseCountingWrapperForTest =
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>;
// Assert that on certain build configurations, the related traits are not
// even used. If they were, they'd result in a different C++ type, and would
// trigger more costly cross-type raw_ptr/raw_ref conversions.
#if !PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
static_assert(!kDisableMTECheckedPtr);
#endif
#if !BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
static_assert(!kDisableHooks);
#endif
// Use TraitBundle, instead of TraitBundleInt, to re-normalize trait list
// (i.e. order canonically and remove duplicates).
template <typename TraitToAdd>
using AddTraitT = TraitBundle<Traits..., TraitToAdd>;
// Unlike AddTraitT, no need to re-normalize because ExcludeTrait preserves
// the trait list structure.
template <typename TraitToRemove>
using RemoveTraitT = TraitBundleInt<
typename ExcludeTrait<TraitToRemove>::template Filter<Traits>...>;
};
template <typename TraitBundle>
struct TraitsToImpl; struct TraitsToImpl;
} // namespace raw_ptr_traits } // namespace raw_ptr_traits
@ -306,33 +346,17 @@ struct MTECheckedPtrImpl {
// Wraps a pointer, and returns its uintptr_t representation. // Wraps a pointer, and returns its uintptr_t representation.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
// Catch the obviously unsupported cases, e.g. `nullptr` or `-1ull`.
//
// `ExtractPtr(ptr)` should be functionally identical to `ptr` for
// the purposes of `EnabledForPtr()`, since we assert that `ptr` is
// an untagged raw pointer (there are no tag bits provided by
// MTECheckedPtr to strip off). However, something like `-1ull`
// looks identical to a fully tagged-up pointer. We'll add a check
// here just to make sure there's no difference in the support check
// whether extracted or not.
const bool extracted_supported =
PartitionAllocSupport::EnabledForPtr(ExtractPtr(ptr));
const bool raw_supported = PartitionAllocSupport::EnabledForPtr(ptr);
PA_BASE_DCHECK(extracted_supported == raw_supported);
// At the expense of consistency, we use the `raw_supported`
// condition. When wrapping a raw pointer, we assert that having set
// bits conflatable with the MTECheckedPtr tag disqualifies `ptr`
// from support.
if (!raw_supported) {
return ptr;
}
// Disambiguation: UntagPtr removes the hardware MTE tag, whereas this // Disambiguation: UntagPtr removes the hardware MTE tag, whereas this
// function is responsible for adding the software MTE tag. // function is responsible for adding the software MTE tag.
uintptr_t addr = partition_alloc::UntagPtr(ptr); uintptr_t addr = partition_alloc::UntagPtr(ptr);
PA_BASE_DCHECK(ExtractTag(addr) == 0ull); PA_BASE_DCHECK(ExtractTag(addr) == 0ull);
// Return a not-wrapped |addr|, if it's either nullptr or if the protection
// for this pointer is disabled.
if (!PartitionAllocSupport::EnabledForPtr(ptr)) {
return ptr;
}
// Read the tag and place it in the top bits of the address. // Read the tag and place it in the top bits of the address.
// Even if PartitionAlloc's tag has less than kTagBits, we'll read // Even if PartitionAlloc's tag has less than kTagBits, we'll read
// what's given and pad the rest with 0s. // what's given and pad the rest with 0s.
@ -383,30 +407,19 @@ struct MTECheckedPtrImpl {
return wrapped_ptr; return wrapped_ptr;
} }
// Unwraps the pointer as a T*, without making an assertion on whether // Unwraps the pointer's uintptr_t representation, while asserting that memory
// memory was freed or not. // hasn't been freed. The function must handle nullptr gracefully.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
// Return `wrapped_ptr` straightaway if protection is disabled, e.g. // SafelyUnwrapPtrForDereference handles nullptr case well.
// when `ptr` is `nullptr` or `uintptr_t{-1ull}`. return SafelyUnwrapPtrForDereference(wrapped_ptr);
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
} }
// Unwraps the pointer's uintptr_t representation, without making an assertion // Unwraps the pointer's uintptr_t representation, without making an assertion
// on whether memory was freed or not. // on whether memory was freed or not.
template <typename T> template <typename T>
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
// Return `wrapped_ptr` straightaway if protection is disabled, e.g. return ExtractPtr(wrapped_ptr);
// when `ptr` is `nullptr` or `uintptr_t{-1ull}`.
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
} }
// Upcasts the wrapped pointer. // Upcasts the wrapped pointer.
@ -509,12 +522,10 @@ struct MTECheckedPtrImpl {
// wrapped, unrwapped, etc. // wrapped, unrwapped, etc.
// //
// Test only. // Test only.
template <RawPtrTraits Traits> template <typename Traits>
struct RawPtrCountingImplWrapperForTest struct RawPtrCountingImplWrapperForTest
: public raw_ptr_traits::TraitsToImpl<Traits>::Impl { : public raw_ptr_traits::TraitsToImpl<Traits>::Impl {
static_assert( static_assert(!Traits::kUseCountingWrapperForTest);
!raw_ptr_traits::Contains(Traits,
RawPtrTraits::kUseCountingWrapperForTest));
using SuperImpl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl; using SuperImpl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -674,36 +685,31 @@ struct IsSupportedType<T,
#undef PA_WINDOWS_HANDLE_TYPE #undef PA_WINDOWS_HANDLE_TYPE
#endif #endif
template <RawPtrTraits Traits> template <typename Traits>
struct TraitsToImpl { struct TraitsToImpl {
static_assert(AreValid(Traits), "Unknown raw_ptr trait(s)");
private: private:
// UnderlyingImpl is the struct that provides the implementation of the // UnderlyingImpl is the struct that provides the implementation of the
// protections related to raw_ptr. // protections related to raw_ptr.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
using UnderlyingImpl = internal::RawPtrBackupRefImpl< using UnderlyingImpl = internal::RawPtrBackupRefImpl<
/*allow_dangling=*/Contains(Traits, RawPtrTraits::kMayDangle)>; /*AllowDangling=*/Traits::kMayDangle>;
#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR) #elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
using UnderlyingImpl = std::conditional_t< using UnderlyingImpl =
Contains(Traits, RawPtrTraits::kMayDangle), std::conditional_t<Traits::kMayDangle,
// No special bookkeeping required for this case, // No special bookkeeping required for this case,
// just treat these as ordinary pointers. // just treat these as ordinary pointers.
internal::RawPtrNoOpImpl, internal::RawPtrNoOpImpl,
internal::RawPtrAsanUnownedImpl< internal::RawPtrAsanUnownedImpl>;
Contains(Traits, RawPtrTraits::kAllowPtrArithmetic)>>;
#elif PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #elif PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
using UnderlyingImpl = using UnderlyingImpl =
std::conditional_t<Contains(Traits, RawPtrTraits::kDisableMTECheckedPtr), std::conditional_t<Traits::kDisableMTECheckedPtr,
internal::RawPtrNoOpImpl, internal::RawPtrNoOpImpl,
internal::MTECheckedPtrImpl< internal::MTECheckedPtrImpl<
internal::MTECheckedPtrImplPartitionAllocSupport>>; internal::MTECheckedPtrImplPartitionAllocSupport>>;
#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR) #elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
using UnderlyingImpl = using UnderlyingImpl = std::conditional_t<Traits::kDisableHooks,
std::conditional_t<Contains(Traits, RawPtrTraits::kDisableHooks), internal::RawPtrNoOpImpl,
internal::RawPtrNoOpImpl, internal::RawPtrHookableImpl>;
internal::RawPtrHookableImpl>;
#else #else
using UnderlyingImpl = internal::RawPtrNoOpImpl; using UnderlyingImpl = internal::RawPtrNoOpImpl;
#endif #endif
@ -714,9 +720,9 @@ struct TraitsToImpl {
// Impl may be different from UnderlyingImpl, because it may include a // Impl may be different from UnderlyingImpl, because it may include a
// wrapper. // wrapper.
using Impl = std::conditional_t< using Impl = std::conditional_t<
Contains(Traits, RawPtrTraits::kUseCountingWrapperForTest), Traits::kUseCountingWrapperForTest,
internal::RawPtrCountingImplWrapperForTest< internal::RawPtrCountingImplWrapperForTest<
Remove(Traits, RawPtrTraits::kUseCountingWrapperForTest)>, typename Traits::template RemoveTraitT<UseCountingWrapperForTest>>,
UnderlyingImpl>; UnderlyingImpl>;
}; };
@ -748,11 +754,13 @@ struct TraitsToImpl {
// non-default move constructor/assignment. Thus, it's possible to get an error // non-default move constructor/assignment. Thus, it's possible to get an error
// where the pointer is not actually dangling, and have to work around the // where the pointer is not actually dangling, and have to work around the
// compiler. We have not managed to construct such an example in Chromium yet. // compiler. We have not managed to construct such an example in Chromium yet.
template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty> template <typename T, typename Traits = raw_ptr_traits::TraitBundle<>>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr { class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// Type to return from ExtractAsDangling(), which is identical except // Type to return from ExtractAsDangling(), which is identical except
// kMayDangle trait is added (if one isn't there already). // MayDangle trait is added (if one isn't there already).
using DanglingRawPtrType = raw_ptr<T, Traits | RawPtrTraits::kMayDangle>; using DanglingRawPtrType =
raw_ptr<T,
typename Traits::template AddTraitT<raw_ptr_traits::MayDangle>>;
public: public:
using Impl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl; using Impl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -834,15 +842,17 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
template <RawPtrTraits PassedTraits, template <
typename Unused = std::enable_if_t<Traits != PassedTraits>> typename PassedTraits,
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
PA_ALWAYS_INLINE explicit raw_ptr(const raw_ptr<T, PassedTraits>& p) noexcept PA_ALWAYS_INLINE explicit raw_ptr(const raw_ptr<T, PassedTraits>& p) noexcept
: wrapped_ptr_(Impl::WrapRawPtrForDuplication( : wrapped_ptr_(Impl::WrapRawPtrForDuplication(
raw_ptr_traits::TraitsToImpl<PassedTraits>::Impl:: raw_ptr_traits::TraitsToImpl<PassedTraits>::Impl::
UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {} UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {}
template <RawPtrTraits PassedTraits, template <
typename Unused = std::enable_if_t<Traits != PassedTraits>> typename PassedTraits,
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
PA_ALWAYS_INLINE raw_ptr& operator=( PA_ALWAYS_INLINE raw_ptr& operator=(
const raw_ptr<T, PassedTraits>& p) noexcept { const raw_ptr<T, PassedTraits>& p) noexcept {
Impl::ReleaseWrappedPtr(wrapped_ptr_); Impl::ReleaseWrappedPtr(wrapped_ptr_);
@ -994,23 +1004,20 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
return *this += -delta_elems; return *this += -delta_elems;
} }
// Do not disable operator+() and operator-(). template <
// They provide OOB checks. Keep them enabled, which may be blocked later when typename Z,
// attempting to apply the += or -= operation, when disabled. In the absence typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>>
// of operators +/-, the compiler is free to implicitly convert to the
// underlying T* representation and perform ordinary pointer arithmetic, thus
// invalidating the purpose behind disabling them.
template <typename Z>
friend PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p, Z delta_elems) { friend PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p; raw_ptr result = p;
return result += delta_elems; return result += delta_elems;
} }
template <typename Z> template <
typename Z,
typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>>
friend PA_ALWAYS_INLINE raw_ptr operator-(const raw_ptr& p, Z delta_elems) { friend PA_ALWAYS_INLINE raw_ptr operator-(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p; raw_ptr result = p;
return result -= delta_elems; return result -= delta_elems;
} }
friend PA_ALWAYS_INLINE ptrdiff_t operator-(const raw_ptr& p1, friend PA_ALWAYS_INLINE ptrdiff_t operator-(const raw_ptr& p1,
const raw_ptr& p2) { const raw_ptr& p2) {
return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_); return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_);
@ -1079,22 +1086,22 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work, // `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work,
// because a comparison operator defined inline would not be allowed to call // because a comparison operator defined inline would not be allowed to call
// `raw_ptr<U>`'s private `GetForComparison()` method. // `raw_ptr<U>`'s private `GetForComparison()` method.
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2> template <typename U, typename V, typename R1, typename R2>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, R1>& lhs, friend PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs); const raw_ptr<V, R2>& rhs);
@ -1204,41 +1211,41 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// #union, #global-scope, #constexpr-ctor-field-initializer // #union, #global-scope, #constexpr-ctor-field-initializer
RAW_PTR_EXCLUSION T* wrapped_ptr_; RAW_PTR_EXCLUSION T* wrapped_ptr_;
template <typename U, base::RawPtrTraits R> template <typename U, typename R>
friend class raw_ptr; friend class raw_ptr;
}; };
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() == rhs.GetForComparison(); return lhs.GetForComparison() == rhs.GetForComparison();
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return !(lhs == rhs); return !(lhs == rhs);
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() < rhs.GetForComparison(); return lhs.GetForComparison() < rhs.GetForComparison();
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() > rhs.GetForComparison(); return lhs.GetForComparison() > rhs.GetForComparison();
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() <= rhs.GetForComparison(); return lhs.GetForComparison() <= rhs.GetForComparison();
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, typename Traits1, typename Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs, PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) { const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() >= rhs.GetForComparison(); return lhs.GetForComparison() >= rhs.GetForComparison();
@ -1247,7 +1254,7 @@ PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
template <typename T> template <typename T>
struct IsRawPtr : std::false_type {}; struct IsRawPtr : std::false_type {};
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {}; struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -1256,9 +1263,9 @@ inline constexpr bool IsRawPtrV = IsRawPtr<T>::value;
template <typename T> template <typename T>
inline constexpr bool IsRawPtrMayDangleV = false; inline constexpr bool IsRawPtrMayDangleV = false;
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> = inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
raw_ptr_traits::Contains(Traits, RawPtrTraits::kMayDangle); Traits::kMayDangle;
// Template helpers for working with T* or raw_ptr<T>. // Template helpers for working with T* or raw_ptr<T>.
template <typename T> template <typename T>
@ -1267,7 +1274,7 @@ struct IsPointer : std::false_type {};
template <typename T> template <typename T>
struct IsPointer<T*> : std::true_type {}; struct IsPointer<T*> : std::true_type {};
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
struct IsPointer<raw_ptr<T, Traits>> : std::true_type {}; struct IsPointer<raw_ptr<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -1283,7 +1290,7 @@ struct RemovePointer<T*> {
using type = T; using type = T;
}; };
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
struct RemovePointer<raw_ptr<T, Traits>> { struct RemovePointer<raw_ptr<T, Traits>> {
using type = T; using type = T;
}; };
@ -1304,19 +1311,23 @@ using base::raw_ptr;
// //
// When using it, please provide a justification about what guarantees that it // When using it, please provide a justification about what guarantees that it
// will never be dereferenced after becoming dangling. // will never be dereferenced after becoming dangling.
constexpr auto DisableDanglingPtrDetection = base::RawPtrTraits::kMayDangle; using DisableDanglingPtrDetection =
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
// See `docs/dangling_ptr.md` // See `docs/dangling_ptr.md`
// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the // Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
// occurrences are meant to be removed. See https://crbug.com/1291138. // occurrences are meant to be removed. See https://crbug.com/1291138.
constexpr auto DanglingUntriaged = base::RawPtrTraits::kMayDangle; using DanglingUntriaged =
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
// This type is to be used in callbacks arguments when it is known that they // This type is to be used in callbacks arguments when it is known that they
// might receive dangling pointers. In any other cases, please use one of: // might receive dangling pointers. In any other cases, please use one of:
// - raw_ptr<T, DanglingUntriaged> // - raw_ptr<T, DanglingUntriaged>
// - raw_ptr<T, DisableDanglingPtrDetection> // - raw_ptr<T, DisableDanglingPtrDetection>
template <typename T, base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty> template <typename T>
using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>; using MayBeDangling = base::raw_ptr<
T,
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>>;
// The following template parameters are only meaningful when `raw_ptr` // The following template parameters are only meaningful when `raw_ptr`
// is `MTECheckedPtr` (never the case unless a particular GN arg is set // is `MTECheckedPtr` (never the case unless a particular GN arg is set
@ -1331,18 +1342,25 @@ using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>;
// See `base/memory/raw_ptr_mtecheckedptr.md` // See `base/memory/raw_ptr_mtecheckedptr.md`
// Direct pass-through to no-op implementation. // Direct pass-through to no-op implementation.
constexpr auto DegradeToNoOpWhenMTE = base::RawPtrTraits::kDisableMTECheckedPtr; using DegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::DisableMTECheckedPtr>;
// The use of pointer arithmetic with raw_ptr is strongly discouraged and // As above, but with the "untriaged dangling" annotation.
// disabled by default. Usually a container like span<> should be used using DanglingUntriagedDegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle<
// instead of the raw_ptr. base::raw_ptr_traits::MayDangle,
constexpr auto AllowPtrArithmetic = base::RawPtrTraits::kAllowPtrArithmetic; base::raw_ptr_traits::DisableMTECheckedPtr>;
// As above, but with the "explicitly disable protection" annotation.
using DisableDanglingPtrDetectionDegradeToNoOpWhenMTE =
base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::MayDangle,
base::raw_ptr_traits::DisableMTECheckedPtr>;
namespace std { namespace std {
// Override so set/map lookups do not create extra raw_ptr. This also allows // Override so set/map lookups do not create extra raw_ptr. This also allows
// dangling pointers to be used for lookup. // dangling pointers to be used for lookup.
template <typename T, base::RawPtrTraits Traits> template <typename T, typename Traits>
struct less<raw_ptr<T, Traits>> { struct less<raw_ptr<T, Traits>> {
using Impl = typename raw_ptr<T, Traits>::Impl; using Impl = typename raw_ptr<T, Traits>::Impl;
using is_transparent = void; using is_transparent = void;
@ -1367,7 +1385,7 @@ struct less<raw_ptr<T, Traits>> {
// Define for cases where raw_ptr<T> holds a pointer to an array of type T. // Define for cases where raw_ptr<T> holds a pointer to an array of type T.
// This is consistent with definition of std::iterator_traits<T*>. // This is consistent with definition of std::iterator_traits<T*>.
// Algorithms like std::binary_search need that. // Algorithms like std::binary_search need that.
template <typename T, base::RawPtrTraits Traits> template <typename T, typename Traits>
struct iterator_traits<raw_ptr<T, Traits>> { struct iterator_traits<raw_ptr<T, Traits>> {
using difference_type = ptrdiff_t; using difference_type = ptrdiff_t;
using value_type = std::remove_cv_t<T>; using value_type = std::remove_cv_t<T>;
@ -1376,33 +1394,6 @@ struct iterator_traits<raw_ptr<T, Traits>> {
using iterator_category = std::random_access_iterator_tag; using iterator_category = std::random_access_iterator_tag;
}; };
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ptr<T, Traits>> {
using pointer = ::raw_ptr<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ptr<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(&r);
}
static constexpr element_type* to_address(pointer p) noexcept {
return p.get();
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std } // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_

View File

@ -12,34 +12,13 @@
namespace base::internal { namespace base::internal {
PA_NO_SANITIZE("address") PA_NO_SANITIZE("address")
bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr) { bool RawPtrAsanUnownedImpl::EndOfAliveAllocation(const volatile void* ptr) {
uintptr_t address = reinterpret_cast<uintptr_t>(ptr); uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return __asan_region_is_poisoned(reinterpret_cast<void*>(address), 1) &&
// Normally, we probe the first byte of an object, but in cases of pointer
// arithmetic, we may be probing subsequent bytes, including the legal
// "end + 1" position.
//
// Alas, ASAN will claim an unmapped page is unpoisoned, so willfully ignore
// the fist address of a page, since "end + 1" of an object allocated exactly
// up to a page boundary will SEGV on probe. This will cause false negatives
// for pointers that happen to be page aligned, which is undesirable but
// necessary for now.
//
// We minimize the consequences by using the pointer arithmetic flag in
// higher levels to conditionalize this suppression.
//
// TODO(tsepez): this may still fail for a non-accessible but non-null
// return from, say, malloc(0) which happens to be page-aligned.
//
// TODO(tsepez): enforce the pointer arithmetic flag. Until then, we
// may fail here if a pointer requires the flag but is lacking it.
return is_adjustable_ptr &&
((address & 0x0fff) == 0 ||
__asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) &&
!__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1); !__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1);
} }
bool LikelySmuggledScalar(const volatile void* ptr) { bool RawPtrAsanUnownedImpl::LikelySmuggledScalar(const volatile void* ptr) {
intptr_t address = reinterpret_cast<intptr_t>(ptr); intptr_t address = reinterpret_cast<intptr_t>(ptr);
return address < 0x4000; // Negative or small positive. return address < 0x4000; // Negative or small positive.
} }

View File

@ -19,10 +19,6 @@
namespace base::internal { namespace base::internal {
bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr);
bool LikelySmuggledScalar(const volatile void* ptr);
template <bool IsAdjustablePtr>
struct RawPtrAsanUnownedImpl { struct RawPtrAsanUnownedImpl {
// Wraps a pointer. // Wraps a pointer.
template <typename T> template <typename T>
@ -95,11 +91,14 @@ struct RawPtrAsanUnownedImpl {
template <typename T> template <typename T>
static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) { static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) {
if (wrapped_ptr && !LikelySmuggledScalar(wrapped_ptr) && if (wrapped_ptr && !LikelySmuggledScalar(wrapped_ptr) &&
!EndOfAliveAllocation(wrapped_ptr, IsAdjustablePtr)) { !EndOfAliveAllocation(wrapped_ptr)) {
reinterpret_cast<const volatile uint8_t*>(wrapped_ptr)[0]; reinterpret_cast<const volatile uint8_t*>(wrapped_ptr)[0];
} }
} }
static bool EndOfAliveAllocation(const volatile void* ptr);
static bool LikelySmuggledScalar(const volatile void* ptr);
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T> template <typename T>

View File

@ -13,7 +13,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -134,7 +133,7 @@ struct RawPtrBackupRefImpl {
#endif #endif
AcquireInternal(address); AcquireInternal(address);
} else { } else {
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if PA_HAS_BUILTIN(__builtin_constant_p) #if PA_HAS_BUILTIN(__builtin_constant_p)
// Similarly to `IsSupportedAndNotNull` above, elide the // Similarly to `IsSupportedAndNotNull` above, elide the
// `BanSuperPageFromBRPPool` call if the compiler can prove that `address` // `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
@ -149,7 +148,7 @@ struct RawPtrBackupRefImpl {
partition_alloc::internal::AddressPoolManagerBitmap:: partition_alloc::internal::AddressPoolManagerBitmap::
BanSuperPageFromBRPPool(address); BanSuperPageFromBRPPool(address);
} }
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
} }
return ptr; return ptr;

View File

@ -1,76 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
// Struct intended to be used with designated initializers and passed
// to the `CountersMatch()` matcher.
//
// `CountingImplType` isn't used directly; it tells the `CountersMatch`
// matcher which impl's static members should be checked.
template <typename CountingImplType>
struct CountingRawPtrExpectations {
absl::optional<int> wrap_raw_ptr_cnt;
absl::optional<int> release_wrapped_ptr_cnt;
absl::optional<int> get_for_dereference_cnt;
absl::optional<int> get_for_extraction_cnt;
absl::optional<int> get_for_comparison_cnt;
absl::optional<int> wrapped_ptr_swap_cnt;
absl::optional<int> wrapped_ptr_less_cnt;
absl::optional<int> pointer_to_member_operator_cnt;
absl::optional<int> wrap_raw_ptr_for_dup_cnt;
absl::optional<int> get_for_duplication_cnt;
};
#define REPORT_UNEQUAL_RAW_PTR_COUNTER(member_name, CounterClassImpl) \
{ \
if (arg.member_name.has_value() && \
arg.member_name.value() != CounterClassImpl::member_name) { \
*result_listener << "Expected `" #member_name "` to be " \
<< arg.member_name.value() << " but got " \
<< CounterClassImpl::member_name << "; "; \
result = false; \
} \
}
#define REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CounterClassImpl) \
{ \
result = true; \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(release_wrapped_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_dereference_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_extraction_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_comparison_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_swap_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_less_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(pointer_to_member_operator_cnt, \
CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_for_dup_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_duplication_cnt, CounterClassImpl) \
}
// Matcher used with `CountingRawPtr`. Provides slightly shorter
// boilerplate for verifying counts. This inner function is detached
// from the `MATCHER` to isolate the templating.
template <typename CountingImplType>
bool CountersMatchImpl(const CountingRawPtrExpectations<CountingImplType>& arg,
testing::MatchResultListener* result_listener) {
bool result = true;
REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CountingImplType);
return result;
}
// Implicit `arg` has type `CountingRawPtrExpectations`, specialized for
// the specific counting impl.
MATCHER(CountersMatch, "counting impl has specified counters") {
return CountersMatchImpl(arg, result_listener);
}
#undef REPORT_UNEQUAL_RAW_PTR_COUNTERS
#undef REPORT_UNEQUAL_RAW_PTR_COUNTER
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_

View File

@ -17,7 +17,7 @@
namespace base { namespace base {
template <class T, RawPtrTraits Traits> template <class T, typename Traits>
class raw_ref; class raw_ref;
namespace internal { namespace internal {
@ -25,7 +25,7 @@ namespace internal {
template <class T> template <class T>
struct is_raw_ref : std::false_type {}; struct is_raw_ref : std::false_type {};
template <class T, RawPtrTraits Traits> template <class T, typename Traits>
struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {}; struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {};
template <class T> template <class T>
@ -53,7 +53,7 @@ constexpr inline bool is_raw_ref_v = is_raw_ref<T>::value;
// Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed // Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed
// independent of the underlying `T`, similar to `std::reference_wrapper`. That // independent of the underlying `T`, similar to `std::reference_wrapper`. That
// means the reference inside it can be moved and reassigned. // means the reference inside it can be moved and reassigned.
template <class T, RawPtrTraits Traits = RawPtrTraits::kEmpty> template <class T, typename Traits = raw_ptr_traits::TraitBundle<>>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref { class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// operator* is used with the expectation of GetForExtraction semantics: // operator* is used with the expectation of GetForExtraction semantics:
// //
@ -63,7 +63,9 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// The implementation of operator* provides GetForDereference semantics, and // The implementation of operator* provides GetForDereference semantics, and
// this results in spurious crashes in BRP-ASan builds, so we need to disable // this results in spurious crashes in BRP-ASan builds, so we need to disable
// hooks that provide BRP-ASan instrumentation for raw_ref. // hooks that provide BRP-ASan instrumentation for raw_ref.
using Inner = raw_ptr<T, Traits | RawPtrTraits::kDisableHooks>; using Inner = raw_ptr<
T,
typename Traits::template AddTraitT<raw_ptr_traits::DisableHooks>>;
public: public:
using Impl = typename Inner::Impl; using Impl = typename Inner::Impl;
@ -79,8 +81,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
internal::MTECheckedPtrImplPartitionAllocSupport>> || internal::MTECheckedPtrImplPartitionAllocSupport>> ||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) #endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR) #if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> || std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR) #endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrNoOpImpl>; std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
@ -97,24 +98,24 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
raw_ref& operator=(const T&& p) = delete; raw_ref& operator=(const T&& p) = delete;
PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) { PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
} }
PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) { PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
} }
} }
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
@ -127,14 +128,14 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
: inner_(p.inner_) { : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
} }
// Deliberately implicit in order to support implicit upcast. // Deliberately implicit in order to support implicit upcast.
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
: inner_(std::move(p.inner_)) { : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
} }
@ -148,13 +149,13 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// Upcast assignment // Upcast assignment
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>> template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
@ -163,7 +164,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
PA_ALWAYS_INLINE T& operator*() const { PA_ALWAYS_INLINE T& operator*() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
return inner_.operator*(); return inner_.operator*();
} }
@ -172,12 +173,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// used in place of operator*() when the memory referred to by the reference // used in place of operator*() when the memory referred to by the reference
// is not immediately going to be accessed. // is not immediately going to be accessed.
PA_ALWAYS_INLINE T& get() const { PA_ALWAYS_INLINE T& get() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
return *inner_.get(); return *inner_.get();
} }
PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL { PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
return inner_.operator->(); return inner_.operator->();
} }
@ -190,142 +191,123 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept { friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
swap(lhs.inner_, rhs.inner_); swap(lhs.inner_, rhs.inner_);
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <class U>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<U, Traits>& rhs) {
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs, PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
const raw_ref<V, Traits2>& rhs); return lhs.inner_ == rhs.inner_;
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> }
friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs, template <class U>
const raw_ref<V, Traits2>& rhs); friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs,
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> const raw_ref<U, Traits>& rhs) {
friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs, PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
const raw_ref<V, Traits2>& rhs); PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> return lhs.inner_ != rhs.inner_;
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs, }
const raw_ref<V, Traits2>& rhs); template <class U>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs,
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs, const raw_ref<U, Traits>& rhs) {
const raw_ref<V, Traits2>& rhs); PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ == &rhs; return lhs.inner_ == &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ != &rhs; return lhs.inner_ != &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ < &rhs; return lhs.inner_ < &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ > &rhs; return lhs.inner_ > &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ <= &rhs; return lhs.inner_ <= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ >= &rhs; return lhs.inner_ >= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs == rhs.inner_; return &lhs == rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs != rhs.inner_; return &lhs != rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs < rhs.inner_; return &lhs < rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs > rhs.inner_; return &lhs > rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs <= rhs.inner_; return &lhs <= rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return &lhs >= rhs.inner_; return &lhs >= rhs.inner_;
} }
private: private:
template <class U, RawPtrTraits R> template <class U, typename R>
friend class raw_ref; friend class raw_ref;
Inner inner_; Inner inner_;
}; };
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ == rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ != rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
// CTAD deduction guide. // CTAD deduction guide.
template <class T> template <class T>
raw_ref(T&) -> raw_ref<T>; raw_ref(T&) -> raw_ref<T>;
@ -336,7 +318,7 @@ raw_ref(const T&) -> raw_ref<const T>;
template <typename T> template <typename T>
struct IsRawRef : std::false_type {}; struct IsRawRef : std::false_type {};
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
struct IsRawRef<raw_ref<T, Traits>> : std::true_type {}; struct IsRawRef<raw_ref<T, Traits>> : std::true_type {};
template <typename T> template <typename T>
@ -347,7 +329,7 @@ struct RemoveRawRef {
using type = T; using type = T;
}; };
template <typename T, RawPtrTraits Traits> template <typename T, typename Traits>
struct RemoveRawRef<raw_ref<T, Traits>> { struct RemoveRawRef<raw_ref<T, Traits>> {
using type = T; using type = T;
}; };
@ -363,7 +345,7 @@ namespace std {
// Override so set/map lookups do not create extra raw_ref. This also // Override so set/map lookups do not create extra raw_ref. This also
// allows C++ references to be used for lookup. // allows C++ references to be used for lookup.
template <typename T, base::RawPtrTraits Traits> template <typename T, typename Traits>
struct less<raw_ref<T, Traits>> { struct less<raw_ref<T, Traits>> {
using Impl = typename raw_ref<T, Traits>::Impl; using Impl = typename raw_ref<T, Traits>::Impl;
using is_transparent = void; using is_transparent = void;
@ -385,37 +367,6 @@ struct less<raw_ref<T, Traits>> {
} }
}; };
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ref<T, Traits>> {
using pointer = ::raw_ref<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ref<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(r);
}
static constexpr element_type* to_address(pointer p) noexcept {
// `raw_ref::get` is used instead of raw_ref::operator*`. It provides
// GetForExtraction rather rather than GetForDereference semantics (see
// raw_ptr.h). This should be used when we we don't know the memory will be
// accessed.
return &(p.get());
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std } // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_

View File

@ -4,16 +4,14 @@
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
ReservationOffsetTable::_PaddedReservationOffsetTables ReservationOffsetTable::_PaddedReservationOffsetTables
ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN; ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN;
#else #else
ReservationOffsetTable::_ReservationOffsetTable ReservationOffsetTable::_ReservationOffsetTable
ReservationOffsetTable::reservation_offset_table_; ReservationOffsetTable::reservation_offset_table_;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -17,6 +17,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h" #include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -66,7 +67,7 @@ static constexpr uint16_t kOffsetTagNormalBuckets =
// granularity is kSuperPageSize. // granularity is kSuperPageSize.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
public: public:
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// There is one reservation offset table per Pool in 64-bit mode. // There is one reservation offset table per Pool in 64-bit mode.
static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize; static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
static constexpr size_t kReservationOffsetTableLength = static constexpr size_t kReservationOffsetTableLength =
@ -77,7 +78,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull; static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr size_t kReservationOffsetTableLength = static constexpr size_t kReservationOffsetTableLength =
4 * kGiB / kSuperPageSize; 4 * kGiB / kSuperPageSize;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets, static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets."); "Offsets should be smaller than kOffsetTagNormalBuckets.");
@ -94,7 +95,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
offset = kOffsetTagNotAllocated; offset = kOffsetTagNotAllocated;
} }
}; };
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// If pkey support is enabled, we need to pkey-tag the tables of the pkey // If pkey support is enabled, we need to pkey-tag the tables of the pkey
// pool. For this, we need to pad the tables so that the pkey ones start on a // pool. For this, we need to pad the tables so that the pkey ones start on a
// page boundary. // page boundary.
@ -108,12 +109,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
#else #else
// A single table for the entire 32-bit address space. // A single table for the entire 32-bit address space.
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_; static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
}; };
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) { PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools); PA_DCHECK(0 < handle && handle <= kNumPools);
return ReservationOffsetTable::padded_reservation_offset_tables_ return ReservationOffsetTable::padded_reservation_offset_tables_
.tables[handle - 1] .tables[handle - 1]
.offsets; .offsets;
@ -143,7 +144,7 @@ PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
ReservationOffsetTable::kReservationOffsetTableLength); ReservationOffsetTable::kReservationOffsetTableLength);
return GetReservationOffsetTable(pool) + table_index; return GetReservationOffsetTable(pool) + table_index;
} }
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) { PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
return ReservationOffsetTable::reservation_offset_table_.offsets; return ReservationOffsetTable::reservation_offset_table_.offsets;
} }
@ -153,10 +154,10 @@ PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
return ReservationOffsetTable::reservation_offset_table_.offsets + return ReservationOffsetTable::reservation_offset_table_.offsets +
ReservationOffsetTable::kReservationOffsetTableLength; ReservationOffsetTable::kReservationOffsetTableLength;
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) { PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// In 64-bit mode, find the owning Pool and compute the offset from its base. // In 64-bit mode, find the owning Pool and compute the offset from its base.
auto [pool, offset] = GetPoolAndOffset(address); auto [pool, offset] = GetPoolAndOffset(address);
return ReservationOffsetPointer(pool, offset); return ReservationOffsetPointer(pool, offset);
@ -199,13 +200,13 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
// inside another macro (PA_DCHECK). // inside another macro (PA_DCHECK).
#if !BUILDFLAG(HAS_64_BIT_POINTERS) #if !PA_CONFIG(HAS_64_BITS_POINTERS)
constexpr size_t kBRPOffset = constexpr size_t kBRPOffset =
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap; AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
#else #else
constexpr size_t kBRPOffset = 0ull; constexpr size_t kBRPOffset = 0ull;
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) #endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
// Make sure the reservation start is in the same pool as |address|. // Make sure the reservation start is in the same pool as |address|.
// In the 32-bit mode, the beginning of a reservation may be excluded // In the 32-bit mode, the beginning of a reservation may be excluded
// from the BRP pool, so shift the pointer. The other pools don't have // from the BRP pool, so shift the pointer. The other pools don't have
@ -226,7 +227,7 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
return reservation_start; return reservation_start;
} }
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// If the given address doesn't point to direct-map allocated memory, // If the given address doesn't point to direct-map allocated memory,
// returns 0. // returns 0.
// This variant has better performance than the regular one on 64-bit builds if // This variant has better performance than the regular one on 64-bit builds if
@ -246,7 +247,7 @@ GetDirectMapReservationStart(uintptr_t address,
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0); PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
return reservation_start; return reservation_start;
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
// Returns true if |address| is the beginning of the first super page of a // Returns true if |address| is the beginning of the first super page of a
// reservation, i.e. either a normal bucket super page, or the first super page // reservation, i.e. either a normal bucket super page, or the first super page

View File

@ -10,11 +10,12 @@
#include "base/allocator/partition_alloc_features.h" #include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/types/strong_alias.h" #include "base/types/strong_alias.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_CONFIG(ALLOW_PCSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif #endif
@ -207,7 +208,7 @@ BASE_EXPORT void ConfigurePartitions(
AddDummyRefCount add_dummy_ref_count, AddDummyRefCount add_dummy_ref_count,
AlternateBucketDistribution use_alternate_bucket_distribution); AlternateBucketDistribution use_alternate_bucket_distribution);
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig); BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
#endif #endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -22,6 +22,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_root.h" #include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h" #include "base/allocator/partition_allocator/partition_stats.h"
@ -712,7 +713,7 @@ void ConfigurePartitions(
} }
} }
#if BUILDFLAG(USE_STARSCAN) #if PA_CONFIG(ALLOW_PCSCAN)
void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) { void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
partition_alloc::internal::base::PlatformThread::SetThreadNameHook( partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&::base::PlatformThread::SetName); &::base::PlatformThread::SetName);
@ -729,7 +730,7 @@ void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled(); base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled(); base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// Call this as soon as possible during startup. // Call this as soon as possible during startup.

View File

@ -30,14 +30,13 @@ ThreadSafePartitionRoot& PCScanMetadataAllocator() {
return *allocator; return *allocator;
} }
// TODO(tasak): investigate whether PartitionAlloc tests really need this
// function or not. If we found no tests need, remove it.
void ReinitPCScanMetadataAllocatorForTesting() { void ReinitPCScanMetadataAllocatorForTesting() {
// First, purge memory owned by PCScanMetadataAllocator. // First, purge memory owned by PCScanMetadataAllocator.
PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans | PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages); PurgeFlags::kDiscardUnusedSystemPages);
// Then, reinit the allocator. // Then, reinit the allocator.
PCScanMetadataAllocator().ResetForTesting(true); // IN-TEST PCScanMetadataAllocator().~PartitionRoot();
memset(&PCScanMetadataAllocator(), 0, sizeof(PCScanMetadataAllocator()));
PCScanMetadataAllocator().Init(kConfig); PCScanMetadataAllocator().Init(kConfig);
} }

View File

@ -34,7 +34,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h" #include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h" #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -614,14 +613,14 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr)); PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
// First, check if |maybe_ptr| points to a valid super page or a quarantined // First, check if |maybe_ptr| points to a valid super page or a quarantined
// card. // card.
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE) #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Check if |maybe_ptr| points to a quarantined card. // Check if |maybe_ptr| points to a quarantined card.
if (PA_LIKELY( if (PA_LIKELY(
!QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) { !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
return nullptr; return nullptr;
} }
#else // PA_CONFIG(STARSCAN_USE_CARD_TABLE) #else
// Without the card table, use the reservation offset table to check if // Without the card table, use the reservation offset table to check if
// |maybe_ptr| points to a valid super-page. It's not as precise (meaning that // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
// we may have hit the slow path more frequently), but reduces the memory // we may have hit the slow path more frequently), but reduces the memory
@ -635,11 +634,11 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
return nullptr; return nullptr;
} }
#endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE) #endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // PA_CONFIG(HAS_64_BITS_POINTERS)
if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) { if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
return nullptr; return nullptr;
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
// We are certain here that |maybe_ptr| points to an allocated super-page. // We are certain here that |maybe_ptr| points to an allocated super-page.
return StateBitmapFromAddr(maybe_ptr); return StateBitmapFromAddr(maybe_ptr);
@ -778,14 +777,14 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
size_t quarantine_size() const { return quarantine_size_; } size_t quarantine_size() const { return quarantine_size_; }
private: private:
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() { PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
return PartitionAddressSpace::RegularPoolBase(); return PartitionAddressSpace::RegularPoolBase();
} }
PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() { PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
return PartitionAddressSpace::RegularPoolBaseMask(); return PartitionAddressSpace::RegularPoolBaseMask();
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // PA_CONFIG(HAS_64_BITS_POINTERS)
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) { PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
// |maybe_ptr| may have an MTE tag, so remove it first. // |maybe_ptr| may have an MTE tag, so remove it first.
@ -1290,7 +1289,7 @@ PCScanInternal::~PCScanInternal() = default;
void PCScanInternal::Initialize(PCScan::InitConfig config) { void PCScanInternal::Initialize(PCScan::InitConfig config) {
PA_DCHECK(!is_initialized_); PA_DCHECK(!is_initialized_);
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// Make sure that pools are initialized. // Make sure that pools are initialized.
PartitionAddressSpace::Init(); PartitionAddressSpace::Init();
#endif #endif

View File

@ -9,7 +9,6 @@
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h" #include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
@ -94,12 +93,12 @@ template <typename Derived>
void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) { void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
PA_SCAN_DCHECK(!(end % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
// If the read value is a pointer into the PA region, it's likely // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency. // MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask; const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
const uintptr_t base = Derived::RegularPoolBase(); const uintptr_t base = Derived::RegularPoolBase();
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
for (; begin < end; begin += sizeof(uintptr_t)) { for (; begin < end; begin += sizeof(uintptr_t)) {
// Read the region word-by-word. Everything that we read is a potential // Read the region word-by-word. Everything that we read is a potential
// pointer to or inside an object on heap. Such an object should be // pointer to or inside an object on heap. Such an object should be
@ -107,13 +106,13 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
// //
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin); const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin);
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
if (PA_LIKELY((maybe_ptr & mask) != base)) if (PA_LIKELY((maybe_ptr & mask) != base))
continue; continue;
#else #else
if (!maybe_ptr) if (!maybe_ptr)
continue; continue;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
derived().CheckPointer(maybe_ptr); derived().CheckPointer(maybe_ptr);
} }
} }

View File

@ -8,7 +8,6 @@
#include <limits> #include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -136,12 +135,12 @@ namespace {
} // namespace } // namespace
void Stack::IteratePointers(StackVisitor* visitor) const { void Stack::IteratePointers(StackVisitor* visitor) const {
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED) #if defined(PA_PCSCAN_STACK_SUPPORTED)
PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl); PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by // No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration. // the regular conservative stack iteration.
IterateSafeStackIfNecessary(visitor); IterateSafeStackIfNecessary(visitor);
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED) #endif
} }
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -355,7 +355,7 @@ void ThreadCache::RemoveTombstoneForTesting() {
// static // static
void ThreadCache::Init(PartitionRoot<>* root) { void ThreadCache::Init(PartitionRoot<>* root) {
#if BUILDFLAG(IS_NACL) #if BUILDFLAG(IS_NACL)
static_assert(false, "PartitionAlloc isn't supported for NaCl"); PA_IMMEDIATE_CRASH();
#endif #endif
PA_CHECK(root->buckets[kBucketCount - 1].slot_size == PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
ThreadCache::kLargeSizeThreshold); ThreadCache::kLargeSizeThreshold);

View File

@ -27,7 +27,7 @@
#include "base/allocator/partition_allocator/partition_tls.h" #include "base/allocator/partition_allocator/partition_tls.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(ARCH_CPU_X86_64) && BUILDFLAG(HAS_64_BIT_POINTERS) #if defined(ARCH_CPU_X86_64) && PA_CONFIG(HAS_64_BITS_POINTERS)
#include <algorithm> #include <algorithm>
#endif #endif
@ -43,13 +43,13 @@ namespace tools {
// //
// These two values were chosen randomly, and in particular neither is a valid // These two values were chosen randomly, and in particular neither is a valid
// pointer on most 64 bit architectures. // pointer on most 64 bit architectures.
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if PA_CONFIG(HAS_64_BITS_POINTERS)
constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63; constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf; constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
#else #else
constexpr uintptr_t kNeedle1 = 0xe69e32f3; constexpr uintptr_t kNeedle1 = 0xe69e32f3;
constexpr uintptr_t kNeedle2 = 0x9615ee1c; constexpr uintptr_t kNeedle2 = 0x9615ee1c;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif
// This array contains, in order: // This array contains, in order:
// - kNeedle1 // - kNeedle1
@ -161,8 +161,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
internal::base::TimeDelta periodic_purge_next_interval_ = internal::base::TimeDelta periodic_purge_next_interval_ =
kDefaultPurgeInterval; kDefaultPurgeInterval;
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
uint8_t largest_active_bucket_index_ = 1;
#else
uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex( uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
ThreadCacheLimits::kDefaultSizeThreshold); ThreadCacheLimits::kDefaultSizeThreshold);
#endif
}; };
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default; constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
@ -386,9 +392,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size); void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
static void SetGlobalLimits(PartitionRoot<>* root, float multiplier); static void SetGlobalLimits(PartitionRoot<>* root, float multiplier);
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
static constexpr uint16_t kBucketCount = 1;
#else
static constexpr uint16_t kBucketCount = static constexpr uint16_t kBucketCount =
internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) + internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
1; 1;
#endif
static_assert( static_assert(
kBucketCount < internal::kNumBuckets, kBucketCount < internal::kNumBuckets,
"Cannot have more cached buckets than what the allocator supports"); "Cannot have more cached buckets than what the allocator supports");
@ -535,7 +547,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
internal::PartitionFreelistEntry* entry = bucket.freelist_head; internal::PartitionFreelistEntry* entry = bucket.freelist_head;
// TODO(lizeb): Consider removing once crbug.com/1382658 is fixed. // TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \ #if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
BUILDFLAG(HAS_64_BIT_POINTERS) PA_CONFIG(HAS_64_BITS_POINTERS)
// x86_64 architecture now supports 57 bits of address space, as of Ice Lake // x86_64 architecture now supports 57 bits of address space, as of Ice Lake
// for Intel. However Chrome OS systems do not ship with kernel support for // for Intel. However Chrome OS systems do not ship with kernel support for
// it, but with 48 bits, so all canonical addresses have the upper 16 bits // it, but with 48 bits, so all canonical addresses have the upper 16 bits
@ -543,8 +555,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// by the kernel). // by the kernel).
constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1; constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask)); PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
#endif // BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && #endif
// BUILDFLAG(HAS_64_BIT_POINTERS)
// Passes the bucket size to |GetNext()|, so that in case of freelist // Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to // corruption, we know the bucket size that lead to the crash, helping to
@ -567,7 +578,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket, PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
uintptr_t slot_start) { uintptr_t slot_start) {
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \ #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
BUILDFLAG(HAS_64_BIT_POINTERS) PA_CONFIG(HAS_64_BITS_POINTERS)
// We see freelist corruption crashes happening in the wild. These are likely // We see freelist corruption crashes happening in the wild. These are likely
// due to out-of-bounds accesses in the previous slot, or to a Use-After-Free // due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
// somewhere in the code. // somewhere in the code.
@ -619,7 +630,7 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
address_aligned += 4; address_aligned += 4;
} }
#endif // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && #endif // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
// BUILDFLAG(HAS_64_BIT_POINTERS) // PA_CONFIG(HAS_64_BITS_POINTERS)
auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache( auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
slot_start, bucket.freelist_head); slot_start, bucket.freelist_head);

View File

@ -14,12 +14,13 @@
// other hyper-thread on this core. See the following for context: // other hyper-thread on this core. See the following for context:
// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
#if PA_CONFIG(IS_NONCLANG_MSVC) #if BUILDFLAG(IS_NACL)
// Inline assembly not allowed.
#define PA_YIELD_PROCESSOR ((void)0)
#elif PA_CONFIG(IS_NONCLANG_MSVC)
// MSVC is in its own assemblyless world (crbug.com/1351310#c6). // MSVC is in its own assemblyless world (crbug.com/1351310#c6).
#include <windows.h> #include <windows.h>
#define PA_YIELD_PROCESSOR (YieldProcessor()) #define PA_YIELD_PROCESSOR (YieldProcessor())
#else #else
#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86) #if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
@ -46,6 +47,6 @@
#define PA_YIELD_PROCESSOR ((void)0) #define PA_YIELD_PROCESSOR ((void)0)
#endif #endif
#endif // PA_CONFIG(IS_NONCLANG_MSVC) #endif // BUILDFLAG(IS_NACL)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_

View File

@ -48,17 +48,12 @@ source_set("jni_sample_native_side") {
] ]
} }
generate_jni_registration("jni_registration") {
targets = [ ":jni_sample_java" ]
manual_jni_registration = true
}
shared_library("jni_sample_lib") { shared_library("jni_sample_lib") {
sources = [ "sample_entry_point.cc" ] sources = [ "sample_entry_point.cc" ]
deps = [ deps = [
":jni_registration",
":jni_sample_native_side", ":jni_sample_native_side",
":sample_jni_apk__final_jni", # For registration_header
"//base", "//base",
] ]
} }
@ -68,6 +63,7 @@ android_apk("sample_jni_apk") {
android_manifest = "AndroidManifest.xml" android_manifest = "AndroidManifest.xml"
deps = [ ":jni_sample_java" ] deps = [ ":jni_sample_java" ]
shared_libraries = [ ":jni_sample_lib" ] shared_libraries = [ ":jni_sample_lib" ]
manual_jni_registration = true
} }
# Serves to test that generated bindings compile properly. # Serves to test that generated bindings compile properly.

View File

@ -175,16 +175,6 @@ public class AnimationFrameTimeHistogramTest {
If a native method is called without setting a mock in a unit test, an If a native method is called without setting a mock in a unit test, an
`UnsupportedOperationException` will be thrown. `UnsupportedOperationException` will be thrown.
#### Special case: DFMs
DFMs have their own generated `GEN_JNI`s, which are `<module_name>_GEN_JNI`. In
order to get your DFM's JNI to use the `<module_name>` prefix, you must add your
module name into the argument of the `@NativeMethods` annotation.
So, for example, say your module was named `test_module`. You would annotate
your `Natives` interface with `@NativeMethods("test_module")`, and this would
result in `test_module_GEN_JNI`.
### Testing for readiness: use `get()` ### Testing for readiness: use `get()`
JNI Generator automatically produces checks that verify that the Natives interface can be safely JNI Generator automatically produces checks that verify that the Natives interface can be safely

View File

@ -1,10 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python
# Copyright 2012 The Chromium Authors # Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
"""Extracts native methods from a Java file and generates the JNI bindings. """Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests.""" If you change this, please run and update the tests."""
from __future__ import print_function
import argparse import argparse
import base64 import base64
import collections import collections
@ -59,7 +62,7 @@ _EXTRACT_METHODS_REGEX = re.compile(
flags=re.DOTALL) flags=re.DOTALL)
_NATIVE_PROXY_EXTRACTION_REGEX = re.compile( _NATIVE_PROXY_EXTRACTION_REGEX = re.compile(
r'@NativeMethods(?:\(\s*"(?P<module_name>\w+)"\s*\))?[\S\s]+?interface\s*' r'@NativeMethods[\S\s]+?interface\s*'
r'(?P<interface_name>\w*)\s*(?P<interface_body>{(\s*.*)+?\s*})') r'(?P<interface_name>\w*)\s*(?P<interface_body>{(\s*.*)+?\s*})')
# Use 100 columns rather than 80 because it makes many lines more readable. # Use 100 columns rather than 80 because it makes many lines more readable.
@ -839,12 +842,9 @@ class JNIFromJavaP(object):
self.constant_fields.append( self.constant_fields.append(
ConstantField(name=match.group('name'), value=value.group('value'))) ConstantField(name=match.group('name'), value=value.group('value')))
# We pass in an empty string for the module (which will make the JNI use the
# base module's files) for all javap-derived JNI. There may be a way to get
# the module from a jar file, but it's not needed right now.
self.inl_header_file_generator = InlHeaderFileGenerator( self.inl_header_file_generator = InlHeaderFileGenerator(
'', self.namespace, self.fully_qualified_class, [], self.namespace, self.fully_qualified_class, [], self.called_by_natives,
self.called_by_natives, self.constant_fields, self.jni_params, options) self.constant_fields, self.jni_params, options)
def GetContent(self): def GetContent(self):
return self.inl_header_file_generator.GetContent() return self.inl_header_file_generator.GetContent()
@ -875,21 +875,17 @@ class ProxyHelpers(object):
MAX_CHARS_FOR_HASHED_NATIVE_METHODS = 8 MAX_CHARS_FOR_HASHED_NATIVE_METHODS = 8
@staticmethod @staticmethod
def GetClass(short_name, name_prefix=None): def GetClass(use_hash):
if not name_prefix: return 'N' if use_hash else 'GEN_JNI'
name_prefix = ''
else:
name_prefix += '_'
return name_prefix + ('N' if short_name else 'GEN_JNI')
@staticmethod @staticmethod
def GetPackage(short_name): def GetPackage(use_hash):
return 'J' if short_name else 'org/chromium/base/natives' return 'J' if use_hash else 'org/chromium/base/natives'
@staticmethod @staticmethod
def GetQualifiedClass(short_name, name_prefix=None): def GetQualifiedClass(use_hash):
return '%s/%s' % (ProxyHelpers.GetPackage(short_name), return '%s/%s' % (ProxyHelpers.GetPackage(use_hash),
ProxyHelpers.GetClass(short_name, name_prefix)) ProxyHelpers.GetClass(use_hash))
@staticmethod @staticmethod
def CreateHashedMethodName(fully_qualified_class_name, method_name): def CreateHashedMethodName(fully_qualified_class_name, method_name):
@ -938,18 +934,8 @@ class ProxyHelpers(object):
ptr_type, ptr_type,
include_test_only=True): include_test_only=True):
methods = [] methods = []
first_match = True
module_name = None
for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents): for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents):
interface_body = match.group('interface_body') interface_body = match.group('interface_body')
if first_match:
module_name = match.group('module_name')
first_match = False
else:
assert module_name == match.group(
'module_name'
), 'JNI cannot belong to two modules in one file {} and {}'.format(
module_name, match.group('module_name'))
for method in _EXTRACT_METHODS_REGEX.finditer(interface_body): for method in _EXTRACT_METHODS_REGEX.finditer(interface_body):
name = method.group('name') name = method.group('name')
if not include_test_only and _NameIsTestOnly(name): if not include_test_only and _NameIsTestOnly(name):
@ -975,9 +961,7 @@ class ProxyHelpers(object):
ptr_type=ptr_type) ptr_type=ptr_type)
methods.append(native) methods.append(native)
if not module_name: return methods
module_name = ''
return methods, module_name
class JNIFromJavaSource(object): class JNIFromJavaSource(object):
@ -988,19 +972,20 @@ class JNIFromJavaSource(object):
self.jni_params = JniParams(fully_qualified_class) self.jni_params = JniParams(fully_qualified_class)
self.jni_params.ExtractImportsAndInnerClasses(contents) self.jni_params.ExtractImportsAndInnerClasses(contents)
jni_namespace = ExtractJNINamespace(contents) or options.namespace jni_namespace = ExtractJNINamespace(contents) or options.namespace
natives = ExtractNatives(contents, options.ptr_type)
called_by_natives = ExtractCalledByNatives(self.jni_params, contents, called_by_natives = ExtractCalledByNatives(self.jni_params, contents,
options.always_mangle) options.always_mangle)
natives, module_name = ProxyHelpers.ExtractStaticProxyNatives( natives += ProxyHelpers.ExtractStaticProxyNatives(fully_qualified_class,
fully_qualified_class, contents, options.ptr_type) contents,
natives += ExtractNatives(contents, options.ptr_type) options.ptr_type)
if len(natives) == 0 and len(called_by_natives) == 0: if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError( raise SyntaxError(
'Unable to find any JNI methods for %s.' % fully_qualified_class) 'Unable to find any JNI methods for %s.' % fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator( inl_header_file_generator = InlHeaderFileGenerator(
module_name, jni_namespace, fully_qualified_class, natives, jni_namespace, fully_qualified_class, natives, called_by_natives, [],
called_by_natives, [], self.jni_params, options) self.jni_params, options)
self.content = inl_header_file_generator.GetContent() self.content = inl_header_file_generator.GetContent()
def GetContent(self): def GetContent(self):
@ -1020,13 +1005,11 @@ class HeaderFileGeneratorHelper(object):
def __init__(self, def __init__(self,
class_name, class_name,
module_name,
fully_qualified_class, fully_qualified_class,
use_proxy_hash, use_proxy_hash,
split_name=None, split_name=None,
enable_jni_multiplexing=False): enable_jni_multiplexing=False):
self.class_name = class_name self.class_name = class_name
self.module_name = module_name
self.fully_qualified_class = fully_qualified_class self.fully_qualified_class = fully_qualified_class
self.use_proxy_hash = use_proxy_hash self.use_proxy_hash = use_proxy_hash
self.split_name = split_name self.split_name = split_name
@ -1048,8 +1031,8 @@ class HeaderFileGeneratorHelper(object):
method_name = EscapeClassName(native.proxy_name) method_name = EscapeClassName(native.proxy_name)
return 'Java_%s_%s' % (EscapeClassName( return 'Java_%s_%s' % (EscapeClassName(
ProxyHelpers.GetQualifiedClass( ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing, self.use_proxy_hash
self.module_name)), method_name) or self.enable_jni_multiplexing)), method_name)
template = Template('Java_${JAVA_NAME}_native${NAME}') template = Template('Java_${JAVA_NAME}_native${NAME}')
@ -1064,9 +1047,9 @@ class HeaderFileGeneratorHelper(object):
ret = collections.OrderedDict() ret = collections.OrderedDict()
for entry in origin: for entry in origin:
if isinstance(entry, NativeMethod) and entry.is_proxy: if isinstance(entry, NativeMethod) and entry.is_proxy:
short_name = self.use_proxy_hash or self.enable_jni_multiplexing use_hash = self.use_proxy_hash or self.enable_jni_multiplexing
ret[ProxyHelpers.GetClass(short_name, self.module_name)] \ ret[ProxyHelpers.GetClass(use_hash)] \
= ProxyHelpers.GetQualifiedClass(short_name, self.module_name) = ProxyHelpers.GetQualifiedClass(use_hash)
continue continue
ret[self.class_name] = self.fully_qualified_class ret[self.class_name] = self.fully_qualified_class
@ -1100,8 +1083,7 @@ const char kClassPath_${JAVA_CLASS}[] = \
# Since all proxy methods use the same class, defining this in every # Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations. # header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass( if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing, self.use_proxy_hash or self.enable_jni_multiplexing):
self.module_name):
ret += [template.substitute(values)] ret += [template.substitute(values)]
class_getter = """\ class_getter = """\
@ -1133,8 +1115,7 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
# Since all proxy methods use the same class, defining this in every # Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations. # header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass( if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing, self.use_proxy_hash or self.enable_jni_multiplexing):
self.module_name):
ret += [template.substitute(values)] ret += [template.substitute(values)]
return ''.join(ret) return ''.join(ret)
@ -1143,7 +1124,7 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
class InlHeaderFileGenerator(object): class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration.""" """Generates an inline header file for JNI integration."""
def __init__(self, module_name, namespace, fully_qualified_class, natives, def __init__(self, namespace, fully_qualified_class, natives,
called_by_natives, constant_fields, jni_params, options): called_by_natives, constant_fields, jni_params, options):
self.namespace = namespace self.namespace = namespace
self.fully_qualified_class = fully_qualified_class self.fully_qualified_class = fully_qualified_class
@ -1156,7 +1137,6 @@ class InlHeaderFileGenerator(object):
self.options = options self.options = options
self.helper = HeaderFileGeneratorHelper( self.helper = HeaderFileGeneratorHelper(
self.class_name, self.class_name,
module_name,
fully_qualified_class, fully_qualified_class,
self.options.use_proxy_hash, self.options.use_proxy_hash,
split_name=self.options.split_name, split_name=self.options.split_name,

View File

@ -12,17 +12,14 @@ file.
""" """
import collections import collections
import copy
import difflib import difflib
import inspect import inspect
import optparse import optparse
import os import os
import sys import sys
import tempfile
import unittest import unittest
import jni_generator import jni_generator
import jni_registration_generator import jni_registration_generator
import zipfile
from jni_generator import CalledByNative from jni_generator import CalledByNative
from jni_generator import IsMainDexJavaClass from jni_generator import IsMainDexJavaClass
from jni_generator import NativeMethod from jni_generator import NativeMethod
@ -47,7 +44,7 @@ def _RemoveHashedNames(natives):
return ret return ret
class JniGeneratorOptions(object): class TestOptions(object):
"""The mock options object which is passed to the jni_generator.py script.""" """The mock options object which is passed to the jni_generator.py script."""
def __init__(self): def __init__(self):
@ -57,6 +54,7 @@ class JniGeneratorOptions(object):
self.ptr_type = 'long' self.ptr_type = 'long'
self.cpp = 'cpp' self.cpp = 'cpp'
self.javap = 'mock-javap' self.javap = 'mock-javap'
self.native_exports_optional = True
self.enable_profiling = False self.enable_profiling = False
self.enable_tracing = False self.enable_tracing = False
self.use_proxy_hash = False self.use_proxy_hash = False
@ -67,21 +65,6 @@ class JniGeneratorOptions(object):
self.include_test_only = True self.include_test_only = True
class JniRegistrationGeneratorOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
self.sources_exclusions = []
self.namespace = None
self.enable_proxy_mocks = False
self.require_mocks = False
self.use_proxy_hash = False
self.enable_jni_multiplexing = False
self.manual_jni_registration = False
self.include_test_only = False
self.header_path = None
class BaseTest(unittest.TestCase): class BaseTest(unittest.TestCase):
@staticmethod @staticmethod
@ -117,42 +100,10 @@ class BaseTest(unittest.TestCase):
signature_to_cases[signature].extend(cases) signature_to_cases[signature].extend(cases)
combined_dict[ combined_dict[
'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls( 'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls(
signature_to_cases, '') signature_to_cases, namespace)
return combined_dict return combined_dict
def _TestEndToEndRegistration(self,
input_java_src_files,
options,
name_to_goldens,
header_golden=None):
with tempfile.TemporaryDirectory() as tdir:
options.srcjar_path = os.path.join(tdir, 'srcjar.jar')
if header_golden:
options.header_path = os.path.join(tdir, 'header.h')
input_java_paths = [
self._JoinScriptDir(os.path.join(_JAVA_SRC_DIR, f))
for f in input_java_src_files
]
jni_registration_generator._Generate(options, input_java_paths)
with zipfile.ZipFile(options.srcjar_path, 'r') as srcjar:
for name in srcjar.namelist():
self.assertTrue(
name in name_to_goldens,
f'Found {name} output, but not present in name_to_goldens map.')
contents = srcjar.read(name).decode('utf-8')
self.AssertGoldenTextEquals(contents,
golden_file=name_to_goldens[name])
if header_golden:
with open(options.header_path, 'r') as f:
# Temp directory will cause some diffs each time we run if we don't
# normalize.
contents = f.read().replace(
tdir.replace('/', '_').upper(), 'TEMP_DIR')
self.AssertGoldenTextEquals(contents, golden_file=header_golden)
def _JoinScriptDir(self, path): def _JoinScriptDir(self, path):
script_dir = os.path.dirname(sys.argv[0]) script_dir = os.path.dirname(sys.argv[0])
return os.path.join(script_dir, path) return os.path.join(script_dir, path)
@ -172,7 +123,7 @@ class BaseTest(unittest.TestCase):
content = f.read() content = f.read()
opts = options opts = options
if opts is None: if opts is None:
opts = JniGeneratorOptions() opts = TestOptions()
jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz, jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz,
opts) opts)
@ -241,8 +192,8 @@ class BaseTest(unittest.TestCase):
if golden_file is None: if golden_file is None:
self.assertTrue( self.assertTrue(
caller.startswith('test'), caller.startswith('test'),
'AssertGoldenTextEquals can only be called without at golden file ' 'AssertGoldenTextEquals can only be called from a '
'from a test* method, not %s' % caller) 'test* method, not %s' % caller)
golden_file = '%s%s.golden' % (caller, suffix) golden_file = '%s%s.golden' % (caller, suffix)
golden_text = self._ReadGoldenFile(golden_file) golden_text = self._ReadGoldenFile(golden_file)
if os.environ.get(_REBASELINE_ENV): if os.environ.get(_REBASELINE_ENV):
@ -258,7 +209,6 @@ class BaseTest(unittest.TestCase):
self.AssertTextEquals(golden_text, generated_text) self.AssertTextEquals(golden_text, generated_text)
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class TestGenerator(BaseTest): class TestGenerator(BaseTest):
def testInspectCaller(self): def testInspectCaller(self):
@ -425,21 +375,21 @@ class TestGenerator(BaseTest):
java_class_name=None) java_class_name=None)
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
h1 = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', h1 = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(h1.GetContent()) self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(), h2 = jni_registration_generator.HeaderGenerator('',
'', '', '',
'org/chromium/TestJni', 'org/chromium/TestJni',
natives, jni_params, natives,
True) jni_params,
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
reg_options, '', content), content, use_hash=False, manual_jni_registration=True),
suffix='Registrations') suffix='Registrations')
def testInnerClassNatives(self): def testInnerClassNatives(self):
@ -460,9 +410,9 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesMultiple(self): def testInnerClassNativesMultiple(self):
@ -493,9 +443,9 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesBothInnerAndOuter(self): def testInnerClassNativesBothInnerAndOuter(self):
@ -525,22 +475,22 @@ class TestGenerator(BaseTest):
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('') jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params, natives, [], [], jni_params,
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(), h2 = jni_registration_generator.HeaderGenerator('',
'', '', '',
'org/chromium/TestJni', 'org/chromium/TestJni',
natives, jni_params, natives,
True) jni_params,
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
reg_options, '', content), content, use_hash=False, manual_jni_registration=True),
suffix='Registrations') suffix='Registrations')
def testCalledByNatives(self): def testCalledByNatives(self):
@ -889,9 +839,9 @@ class TestGenerator(BaseTest):
), ),
] ]
self.AssertListEquals(golden_called_by_natives, called_by_natives) self.AssertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', [], h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [],
called_by_natives, [], jni_params, called_by_natives, [], jni_params,
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testCalledByNativeParseError(self): def testCalledByNativeParseError(self):
@ -988,8 +938,8 @@ public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E>
Signature: ([Landroid/icu/text/DisplayContext;)V Signature: ([Landroid/icu/text/DisplayContext;)V
} }
""" """
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), jni_from_javap = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) contents.split('\n'), TestOptions())
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testSnippnetJavap6_7_8(self): def testSnippnetJavap6_7_8(self):
@ -1014,12 +964,12 @@ public class java.util.HashSet {
} }
""" """
jni_from_javap6 = jni_generator.JNIFromJavaP(content_javap6.split('\n'), jni_from_javap6 = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) content_javap6.split('\n'), TestOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP(content_javap7.split('\n'), jni_from_javap7 = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) content_javap7.split('\n'), TestOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP(content_javap8.split('\n'), jni_from_javap8 = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) content_javap8.split('\n'), TestOptions())
self.assertTrue(jni_from_javap6.GetContent()) self.assertTrue(jni_from_javap6.GetContent())
self.assertTrue(jni_from_javap7.GetContent()) self.assertTrue(jni_from_javap7.GetContent())
self.assertTrue(jni_from_javap8.GetContent()) self.assertTrue(jni_from_javap8.GetContent())
@ -1033,16 +983,16 @@ public class java.util.HashSet {
def testFromJavaP(self): def testFromJavaP(self):
contents = self._ReadGoldenFile('testInputStream.javap') contents = self._ReadGoldenFile('testInputStream.javap')
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), jni_from_javap = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) contents.split('\n'), TestOptions())
self.assertEqual(10, len(jni_from_javap.called_by_natives)) self.assertEqual(10, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testConstantsFromJavaP(self): def testConstantsFromJavaP(self):
for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']: for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']:
contents = self._ReadGoldenFile(f) contents = self._ReadGoldenFile(f)
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), jni_from_javap = jni_generator.JNIFromJavaP(
JniGeneratorOptions()) contents.split('\n'), TestOptions())
self.assertEqual(86, len(jni_from_javap.called_by_natives)) self.assertEqual(86, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent()) self.AssertGoldenTextEquals(jni_from_javap.GetContent())
@ -1063,8 +1013,8 @@ public class java.util.HashSet {
private native void nativeSyncSetupEnded( private native void nativeSyncSetupEnded(
int nativeAndroidSyncSetupFlowHandler); int nativeAndroidSyncSetupFlowHandler);
""" """
jni_from_java = jni_generator.JNIFromJavaSource(test_data, 'foo/bar', jni_from_java = jni_generator.JNIFromJavaSource(
JniGeneratorOptions()) test_data, 'foo/bar', TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testRaisesOnNonJNIMethod(self): def testRaisesOnNonJNIMethod(self):
@ -1075,7 +1025,7 @@ public class java.util.HashSet {
} }
""" """
self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data, self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data,
'foo/bar', JniGeneratorOptions()) 'foo/bar', TestOptions())
def testJniSelfDocumentingExample(self): def testJniSelfDocumentingExample(self):
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
@ -1095,7 +1045,7 @@ public class java.util.HashSet {
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, ('com/google/lookhowextremelylongiam/snarf/' test_data, ('com/google/lookhowextremelylongiam/snarf/'
'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'), 'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'),
JniGeneratorOptions()) TestOptions())
jni_lines = jni_from_java.GetContent().split('\n') jni_lines = jni_from_java.GetContent().split('\n')
line = next( line = next(
line for line in jni_lines if line.lstrip().startswith('#ifndef')) line for line in jni_lines if line.lstrip().startswith('#ifndef'))
@ -1163,7 +1113,7 @@ class Foo {
jni_params.JavaToJni('java/nio/ByteBuffer[]')) jni_params.JavaToJni('java/nio/ByteBuffer[]'))
def testNativesLong(self): def testNativesLong(self):
test_options = JniGeneratorOptions() test_options = TestOptions()
test_options.ptr_type = 'long' test_options.ptr_type = 'long'
test_data = """" test_data = """"
private native void nativeDestroy(long nativeChromeBrowserProvider); private native void nativeDestroy(long nativeChromeBrowserProvider);
@ -1181,9 +1131,8 @@ class Foo {
ptr_type=test_options.ptr_type), ptr_type=test_options.ptr_type),
] ]
self.AssertListEquals(golden_natives, natives) self.AssertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', h = jni_generator.InlHeaderFileGenerator(
natives, [], [], jni_params, '', 'org/chromium/TestJni', natives, [], [], jni_params, test_options)
test_options)
self.AssertGoldenTextEquals(h.GetContent()) self.AssertGoldenTextEquals(h.GetContent())
def testMainDexAnnotation(self): def testMainDexAnnotation(self):
@ -1261,7 +1210,8 @@ class Foo {
} }
} }
""" """
options = JniGeneratorOptions() options = TestOptions()
options.native_exports_optional = False
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/example/jni_generator/SampleForTests', options) test_data, 'org/chromium/example/jni_generator/SampleForTests', options)
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
@ -1279,7 +1229,7 @@ class Foo {
def willRaise(): def willRaise():
jni_generator.JNIFromJavaSource(test_data, jni_generator.JNIFromJavaSource(test_data,
'org/chromium/media/VideoCaptureFactory', 'org/chromium/media/VideoCaptureFactory',
JniGeneratorOptions()) TestOptions())
self.assertRaises(SyntaxError, willRaise) self.assertRaises(SyntaxError, willRaise)
@ -1299,7 +1249,7 @@ class Foo {
""" """
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testMultipleJNIAdditionalImport(self): def testMultipleJNIAdditionalImport(self):
@ -1320,7 +1270,7 @@ class Foo {
""" """
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testTracing(self): def testTracing(self):
@ -1341,7 +1291,7 @@ class Foo {
static native void nativeStaticMethod(); static native void nativeStaticMethod();
} }
""" """
options_with_tracing = JniGeneratorOptions() options_with_tracing = TestOptions()
options_with_tracing.enable_tracing = True options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing) test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1364,11 +1314,11 @@ class Foo {
jni_from_java = jni_generator.JNIFromJavaSource(test_data, jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo', 'org/chromium/foo/Foo',
JniGeneratorOptions()) TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent()) self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testSplitNameExample(self): def testSplitNameExample(self):
opts = JniGeneratorOptions() opts = TestOptions()
opts.split_name = "sample" opts.split_name = "sample"
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'), os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'),
@ -1377,58 +1327,19 @@ class Foo {
generated_text, golden_file='SampleForTestsWithSplit_jni.golden') generated_text, golden_file='SampleForTestsWithSplit_jni.golden')
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class ProxyTestGenerator(BaseTest): class ProxyTestGenerator(BaseTest):
def _BuildRegDictFromSample(self): def _BuildRegDictFromSample(self, options=None):
if options is None:
options = TestOptions()
path = self._JoinScriptDir( path = self._JoinScriptDir(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')) os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'))
reg_dict = jni_registration_generator._DictForPath( reg_dict = jni_registration_generator._DictForPath(path)
JniRegistrationGeneratorOptions(), path)
reg_dict = self._MergeRegistrationForTests([reg_dict]) reg_dict = self._MergeRegistrationForTests([reg_dict])
return reg_dict return reg_dict
def testEndToEndProxyHashed(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testEndToEndManualRegistration(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.manual_jni_registration = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'SampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(
input_java_files,
options,
name_to_goldens,
header_golden='SampleForAnnotationProcessorManualJni.golden')
def testEndToEndProxyJniWithModules(self):
input_java_files = [
'SampleForAnnotationProcessor.java', 'SampleModule.java'
]
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden',
'org/chromium/base/natives/module_GEN_JNI.java': 'ModuleGenJni.golden',
'J/module_N.java': 'ModuleJN.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testProxyNativesWithNatives(self): def testProxyNativesWithNatives(self):
test_data = """ test_data = """
package org.chromium.foo; package org.chromium.foo;
@ -1451,7 +1362,7 @@ class ProxyTestGenerator(BaseTest):
} }
""" """
options_with_tracing = JniGeneratorOptions() options_with_tracing = TestOptions()
options_with_tracing.enable_tracing = True options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource( jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing) test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1469,7 +1380,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
golden_natives = [ golden_natives = [
@ -1505,7 +1416,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', True) qualified_clazz, test_data, 'long', True)
golden_natives = [ golden_natives = [
@ -1541,7 +1452,7 @@ class ProxyTestGenerator(BaseTest):
""" """
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', False) qualified_clazz, test_data, 'long', False)
self.AssertListEquals(_RemoveHashedNames(natives), []) self.AssertListEquals(_RemoveHashedNames(natives), [])
@ -1570,10 +1481,9 @@ class ProxyTestGenerator(BaseTest):
} }
""" """
qualified_clazz = 'test/foo/Foo' qualified_clazz = 'test/foo/Foo'
options = JniRegistrationGeneratorOptions() jni_params = TestOptions()
options.manual_jni_registration = True
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
golden_natives = [ golden_natives = [
@ -1590,33 +1500,42 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(_RemoveHashedNames(natives), golden_natives) self.AssertListEquals(_RemoveHashedNames(natives), golden_natives)
jni_params = jni_generator.JniParams(qualified_clazz) jni_params = jni_generator.JniParams(qualified_clazz)
main_dex_header = jni_registration_generator.DictionaryGenerator( main_dex_header = jni_registration_generator.HeaderGenerator(
options, '', '', qualified_clazz, natives, jni_params, '',
main_dex=True).Generate() '',
qualified_clazz,
natives,
jni_params,
main_dex=True,
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header]) content = TestGenerator._MergeRegistrationForTests([main_dex_header])
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(options, '', content)) jni_registration_generator.CreateFromDict(content,
use_hash=False,
manual_jni_registration=True))
other_qualified_clazz = 'test/foo/Bar' other_qualified_clazz = 'test/foo/Bar'
other_natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( other_natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
other_qualified_clazz, non_main_dex_test_data, 'long') other_qualified_clazz, non_main_dex_test_data, 'long')
jni_params = jni_generator.JniParams(other_qualified_clazz) jni_params = jni_generator.JniParams(other_qualified_clazz)
non_main_dex_header = jni_registration_generator.DictionaryGenerator( non_main_dex_header = jni_registration_generator.HeaderGenerator(
options,
'', '',
'', '',
other_qualified_clazz, other_qualified_clazz,
other_natives, other_natives,
jni_params, jni_params,
main_dex=False).Generate() main_dex=False,
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header] + content = TestGenerator._MergeRegistrationForTests([main_dex_header] +
[non_main_dex_header]) [non_main_dex_header])
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(options, '', content), jni_registration_generator.CreateFromDict(content,
use_hash=False,
manual_jni_registration=True),
'AndNonMainDex') 'AndNonMainDex')
def testProxyNatives(self): def testProxyNatives(self):
@ -1656,9 +1575,9 @@ class ProxyTestGenerator(BaseTest):
qualified_clazz = 'org/chromium/example/SampleProxyJni' qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long') qualified_clazz, test_data, 'long')
bad_spacing_natives, _ = jni_generator.ProxyHelpers \ bad_spacing_natives = jni_generator.ProxyHelpers \
.ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long') .ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long')
golden_natives = [ golden_natives = [
NativeMethod( NativeMethod(
@ -1697,32 +1616,34 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives)) self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
self.AssertListEquals(golden_natives, self.AssertListEquals(golden_natives,
_RemoveHashedNames(bad_spacing_natives)) _RemoveHashedNames(bad_spacing_natives))
options = JniGeneratorOptions()
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
jni_params = jni_generator.JniParams(qualified_clazz) jni_params = jni_generator.JniParams(qualified_clazz)
h1 = jni_generator.InlHeaderFileGenerator('', '', qualified_clazz, natives, h1 = jni_generator.InlHeaderFileGenerator('', qualified_clazz, natives, [],
[], [], jni_params, options) [], jni_params, TestOptions())
self.AssertGoldenTextEquals(h1.GetContent()) self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.DictionaryGenerator(reg_options, '', '', h2 = jni_registration_generator.HeaderGenerator('',
qualified_clazz, '',
natives, jni_params, qualified_clazz,
False) natives,
jni_params,
False,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()]) content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
proxy_opts = jni_registration_generator.ProxyOptions(
manual_jni_registration=True)
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict( jni_registration_generator.CreateProxyJavaFromDict(content, proxy_opts),
reg_options, '', content),
suffix='Java') suffix='Java')
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
reg_options, '', content), content,
proxy_opts.use_hash,
manual_jni_registration=proxy_opts.manual_jni_registration),
suffix='Registrations') suffix='Registrations')
def testProxyHashedExample(self): def testProxyHashedExample(self):
opts = JniGeneratorOptions() opts = TestOptions()
opts.use_proxy_hash = True opts.use_proxy_hash = True
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
@ -1733,6 +1654,20 @@ class ProxyTestGenerator(BaseTest):
generated_text, generated_text,
golden_file='HashedSampleForAnnotationProcessor_jni.golden') golden_file='HashedSampleForAnnotationProcessor_jni.golden')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), use_proxy_hash=True)
reg_dict = self._MergeRegistrationForTests([reg_dict])
proxy_opts = jni_registration_generator.ProxyOptions(use_hash=True)
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts),
golden_file='HashedSampleForAnnotationProcessorGenJni.golden')
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts, forwarding=True),
golden_file='HashedSampleForAnnotationProcessorGenJni.2.golden')
def testProxyJniExample(self): def testProxyJniExample(self):
generated_text = self._CreateJniHeaderFromFile( generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'), os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'),
@ -1741,20 +1676,21 @@ class ProxyTestGenerator(BaseTest):
generated_text, golden_file='SampleForAnnotationProcessor_jni.golden') generated_text, golden_file='SampleForAnnotationProcessor_jni.golden')
def testGenJniFlags(self): def testGenJniFlags(self):
options = JniRegistrationGeneratorOptions()
reg_dict = self._BuildRegDictFromSample() reg_dict = self._BuildRegDictFromSample()
proxy_options = jni_registration_generator.ProxyOptions()
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
options, '', reg_dict) reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'Disabled') self.AssertGoldenTextEquals(content, 'Disabled')
options.enable_proxy_mocks = True proxy_options = jni_registration_generator.ProxyOptions(enable_mocks=True)
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
options, '', reg_dict) reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'MocksEnabled') self.AssertGoldenTextEquals(content, 'MocksEnabled')
options.require_mocks = True proxy_options = jni_registration_generator.ProxyOptions(
enable_mocks=True, require_mocks=True)
content = jni_registration_generator.CreateProxyJavaFromDict( content = jni_registration_generator.CreateProxyJavaFromDict(
options, '', reg_dict) reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'MocksRequired') self.AssertGoldenTextEquals(content, 'MocksRequired')
def testProxyTypeInfoPreserved(self): def testProxyTypeInfoPreserved(self):
@ -1772,8 +1708,8 @@ class ProxyTestGenerator(BaseTest):
SomeJavaType[][] someObjects); SomeJavaType[][] someObjects);
} }
""" """
natives, _ = ProxyHelpers.ExtractStaticProxyNatives( natives = ProxyHelpers.ExtractStaticProxyNatives('org/chromium/foo/FooJni',
'org/chromium/foo/FooJni', test_data, 'long') test_data, 'long')
golden_natives = [ golden_natives = [
NativeMethod( NativeMethod(
static=True, static=True,
@ -1822,53 +1758,63 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives)) self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class MultiplexTestGenerator(BaseTest): class MultiplexTestGenerator(BaseTest):
options = JniRegistrationGeneratorOptions()
options.enable_jni_multiplexing = True
def testProxyMultiplexGenJni(self): def testProxyMultiplexGenJni(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath( reg_dict = jni_registration_generator._DictForPath(
self.options, self._JoinScriptDir(path)) self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
reg_dict = self._MergeRegistrationForTests([reg_dict], reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True) enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict( jni_registration_generator.CreateProxyJavaFromDict(
self.options, '', reg_dict), reg_dict, proxy_opts),
golden_file='testProxyMultiplexGenJni.golden') golden_file='testProxyMultiplexGenJni.golden')
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(self.options, jni_registration_generator.CreateProxyJavaFromDict(reg_dict,
'', proxy_opts,
reg_dict,
forwarding=True), forwarding=True),
golden_file='testProxyMultiplexGenJni.2.golden') golden_file='testProxyMultiplexGenJni.2.golden')
def testProxyMultiplexNatives(self): def testProxyMultiplexNatives(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath( reg_dict = jni_registration_generator._DictForPath(
self.options, self._JoinScriptDir(path)) self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
reg_dict = self._MergeRegistrationForTests([reg_dict], reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True) enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict( self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
self.options, '', reg_dict), reg_dict,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing),
golden_file='testProxyMultiplexNatives.golden') golden_file='testProxyMultiplexNatives.golden')
def testProxyMultiplexNativesRegistration(self): def testProxyMultiplexNativesRegistration(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java') path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict_for_registration = jni_registration_generator._DictForPath( reg_dict_for_registration = jni_registration_generator._DictForPath(
self.options, self._JoinScriptDir(path)) self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
reg_dict_for_registration = self._MergeRegistrationForTests( reg_dict_for_registration = self._MergeRegistrationForTests(
[reg_dict_for_registration], enable_jni_multiplexing=True) [reg_dict_for_registration], enable_jni_multiplexing=True)
new_options = copy.copy(self.options) proxy_opts = jni_registration_generator.ProxyOptions(
new_options.manual_jni_registration = True enable_jni_multiplexing=True)
self.AssertGoldenTextEquals( self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(new_options, '', jni_registration_generator.CreateFromDict(
reg_dict_for_registration), reg_dict_for_registration,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=True),
golden_file='testProxyMultiplexNativesRegistration.golden') golden_file='testProxyMultiplexNativesRegistration.golden')

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python
# Copyright 2017 The Chromium Authors # Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
@ -37,7 +37,12 @@ MERGEABLE_KEYS = [
] ]
def _Generate(options, java_file_paths): def _Generate(java_file_paths,
srcjar_path,
proxy_opts,
header_path=None,
namespace='',
include_test_only=True):
"""Generates files required to perform JNI registration. """Generates files required to perform JNI registration.
Generates a srcjar containing a single class, GEN_JNI, that contains all Generates a srcjar containing a single class, GEN_JNI, that contains all
@ -48,92 +53,92 @@ def _Generate(options, java_file_paths):
JNI registration. JNI registration.
Args: Args:
options: arguments from the command line
java_file_paths: A list of java file paths. java_file_paths: A list of java file paths.
srcjar_path: Path to the GEN_JNI srcjar.
header_path: If specified, generates a header file in this location.
namespace: If specified, sets the namespace for the generated header file.
""" """
# Without multiprocessing, script takes ~13 seconds for chrome_public_apk # Without multiprocessing, script takes ~13 seconds for chrome_public_apk
# on a z620. With multiprocessing, takes ~2 seconds. # on a z620. With multiprocessing, takes ~2 seconds.
results = collections.defaultdict(list) results = []
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
for d in pool.imap_unordered(functools.partial(_DictForPath, options), for d in pool.imap_unordered(
java_file_paths): functools.partial(
_DictForPath,
use_proxy_hash=proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
namespace=namespace,
include_test_only=include_test_only), java_file_paths):
if d: if d:
results[d['MODULE_NAME']].append(d) results.append(d)
combined_dicts = collections.defaultdict(dict) # Sort to make output deterministic.
for module_name, module_results in results.items(): results.sort(key=lambda d: d['FULL_CLASS_NAME'])
# Sort to make output deterministic.
module_results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = combined_dicts[module_name]
for key in MERGEABLE_KEYS:
combined_dict[key] = ''.join(d.get(key, '') for d in module_results)
# PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have combined_dict = {}
# duplicates for JNI multiplexing since all native methods with similar for key in MERGEABLE_KEYS:
# signatures map to the same proxy. Similarly, there may be multiple switch combined_dict[key] = ''.join(d.get(key, '') for d in results)
# case entries for the same proxy signatures. # PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have
if options.enable_jni_multiplexing: # duplicates for JNI multiplexing since all native methods with similar
proxy_signatures_list = sorted( # signatures map to the same proxy. Similarly, there may be multiple switch
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n'))) # case entries for the same proxy signatures.
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join( if proxy_opts.enable_jni_multiplexing:
signature for signature in proxy_signatures_list) proxy_signatures_list = sorted(
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
signature for signature in proxy_signatures_list)
proxy_native_array_list = sorted( proxy_native_array_list = sorted(
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split( set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split('},\n')))
'},\n'))) combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join(
combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join( p for p in proxy_native_array_list if p != '') + '}'
p for p in proxy_native_array_list if p != '') + '}'
signature_to_cases = collections.defaultdict(list) signature_to_cases = collections.defaultdict(list)
for d in module_results: for d in results:
for signature, cases in d['SIGNATURE_TO_CASES'].items(): for signature, cases in d['SIGNATURE_TO_CASES'].items():
signature_to_cases[signature].extend(cases) signature_to_cases[signature].extend(cases)
combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls( combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls(
signature_to_cases, module_name) signature_to_cases, namespace)
if options.header_path:
assert len(
combined_dicts) == 1, 'Cannot output a header for multiple modules'
module_name = next(iter(combined_dicts))
combined_dict = combined_dicts[module_name]
if header_path:
combined_dict['HEADER_GUARD'] = \ combined_dict['HEADER_GUARD'] = \
os.path.splitext(options.header_path)[0].replace('/', '_').replace('.', '_').upper() + '_' os.path.splitext(header_path)[0].replace('/', '_').upper() + '_'
combined_dict['NAMESPACE'] = options.namespace combined_dict['NAMESPACE'] = namespace
header_content = CreateFromDict(options, module_name, combined_dict) header_content = CreateFromDict(
with build_utils.AtomicOutput(options.header_path, mode='w') as f: combined_dict,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=proxy_opts.manual_jni_registration)
with build_utils.AtomicOutput(header_path, mode='w') as f:
f.write(header_content) f.write(header_content)
with build_utils.AtomicOutput(options.srcjar_path) as f: with build_utils.AtomicOutput(srcjar_path) as f:
with zipfile.ZipFile(f, 'w') as srcjar: with zipfile.ZipFile(f, 'w') as srcjar:
for module_name, combined_dict in combined_dicts.items(): if proxy_opts.use_hash or proxy_opts.enable_jni_multiplexing:
# J/N.java
if options.use_proxy_hash or options.enable_jni_multiplexing: build_utils.AddToZipHermetic(
# J/N.java srcjar,
build_utils.AddToZipHermetic( '%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(True),
srcjar, data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
'%s.java' % # org/chromium/base/natives/GEN_JNI.java
jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name), build_utils.AddToZipHermetic(
data=CreateProxyJavaFromDict(options, module_name, combined_dict)) srcjar,
# org/chromium/base/natives/GEN_JNI.java '%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False),
build_utils.AddToZipHermetic( data=CreateProxyJavaFromDict(
srcjar, combined_dict, proxy_opts, forwarding=True))
'%s.java' % else:
jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name), # org/chromium/base/natives/GEN_JNI.java
data=CreateProxyJavaFromDict(options, build_utils.AddToZipHermetic(
module_name, srcjar,
combined_dict, '%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False),
forwarding=True)) data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
else:
# org/chromium/base/natives/GEN_JNI.java
build_utils.AddToZipHermetic(
srcjar,
'%s.java' %
jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name),
data=CreateProxyJavaFromDict(options, module_name, combined_dict))
def _DictForPath(options, path): def _DictForPath(path,
use_proxy_hash=False,
enable_jni_multiplexing=False,
namespace='',
include_test_only=True):
with open(path) as f: with open(path) as f:
contents = jni_generator.RemoveComments(f.read()) contents = jni_generator.RemoveComments(f.read())
if '@JniIgnoreNatives' in contents: if '@JniIgnoreNatives' in contents:
@ -141,14 +146,13 @@ def _DictForPath(options, path):
fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName( fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName(
path, contents) path, contents)
natives = jni_generator.ExtractNatives(contents, 'long')
natives, module_name = jni_generator.ProxyHelpers.ExtractStaticProxyNatives( natives += jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class=fully_qualified_class, fully_qualified_class=fully_qualified_class,
contents=contents, contents=contents,
ptr_type='long', ptr_type='long',
include_test_only=options.include_test_only) include_test_only=include_test_only)
natives += jni_generator.ExtractNatives(contents, 'long')
if len(natives) == 0: if len(natives) == 0:
return None return None
# The namespace for the content is separate from the namespace for the # The namespace for the content is separate from the namespace for the
@ -157,13 +161,19 @@ def _DictForPath(options, path):
jni_params = jni_generator.JniParams(fully_qualified_class) jni_params = jni_generator.JniParams(fully_qualified_class)
jni_params.ExtractImportsAndInnerClasses(contents) jni_params.ExtractImportsAndInnerClasses(contents)
is_main_dex = jni_generator.IsMainDexJavaClass(contents) is_main_dex = jni_generator.IsMainDexJavaClass(contents)
dict_generator = DictionaryGenerator(options, module_name, content_namespace, header_generator = HeaderGenerator(
fully_qualified_class, natives, namespace,
jni_params, is_main_dex) content_namespace,
return dict_generator.Generate() fully_qualified_class,
natives,
jni_params,
is_main_dex,
use_proxy_hash,
enable_jni_multiplexing=enable_jni_multiplexing)
return header_generator.Generate()
def _AddForwardingCalls(signature_to_cases, module_name): def _AddForwardingCalls(signature_to_cases, namespace):
template = string.Template(""" template = string.Template("""
JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}( JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}(
JNIEnv* env, JNIEnv* env,
@ -189,8 +199,7 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
jni_generator.JavaDataTypeToC(return_type), jni_generator.JavaDataTypeToC(return_type),
'CLASS_NAME': 'CLASS_NAME':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(True, jni_generator.ProxyHelpers.GetQualifiedClass(True) + namespace),
module_name)),
'PROXY_SIGNATURE': 'PROXY_SIGNATURE':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list)), _GetMultiplexProxyName(return_type, params_list)),
@ -205,7 +214,9 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
return ''.join(s for s in switch_statements) return ''.join(s for s in switch_statements)
def _SetProxyRegistrationFields(options, module_name, registration_dict): def _SetProxyRegistrationFields(registration_dict, use_hash,
enable_jni_multiplexing,
manual_jni_registration):
registration_template = string.Template("""\ registration_template = string.Template("""\
static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = { static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = {
@ -268,20 +279,20 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
} // namespace ${NAMESPACE} } // namespace ${NAMESPACE}
""") """)
short_name = options.use_proxy_hash or options.enable_jni_multiplexing
sub_dict = { sub_dict = {
'ESCAPED_PROXY_CLASS': 'ESCAPED_PROXY_CLASS':
jni_generator.EscapeClassName( jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(short_name, jni_generator.ProxyHelpers.GetQualifiedClass(
module_name)), use_hash or enable_jni_multiplexing)),
'PROXY_CLASS': 'PROXY_CLASS':
jni_generator.ProxyHelpers.GetQualifiedClass(short_name, module_name), jni_generator.ProxyHelpers.GetQualifiedClass(use_hash
or enable_jni_multiplexing),
'KMETHODS': 'KMETHODS':
registration_dict['PROXY_NATIVE_METHOD_ARRAY'], registration_dict['PROXY_NATIVE_METHOD_ARRAY'],
'REGISTRATION_NAME': 'REGISTRATION_NAME':
jni_generator.GetRegistrationFunctionName( jni_generator.GetRegistrationFunctionName(
jni_generator.ProxyHelpers.GetQualifiedClass(short_name, jni_generator.ProxyHelpers.GetQualifiedClass(
module_name)), use_hash or enable_jni_multiplexing)),
} }
if registration_dict['PROXY_NATIVE_METHOD_ARRAY']: if registration_dict['PROXY_NATIVE_METHOD_ARRAY']:
@ -305,17 +316,14 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration
registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call
if options.manual_jni_registration: if manual_jni_registration:
registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute( registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute(
registration_dict) registration_dict)
else: else:
registration_dict['MANUAL_REGISTRATION'] = '' registration_dict['MANUAL_REGISTRATION'] = ''
def CreateProxyJavaFromDict(options, def CreateProxyJavaFromDict(registration_dict, proxy_opts, forwarding=False):
module_name,
registration_dict,
forwarding=False):
template = string.Template("""\ template = string.Template("""\
// Copyright 2018 The Chromium Authors // Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
@ -333,20 +341,19 @@ ${METHODS}
} }
""") """)
is_natives_class = not forwarding and (options.use_proxy_hash is_natives_class = not forwarding and (proxy_opts.use_hash
or options.enable_jni_multiplexing) or proxy_opts.enable_jni_multiplexing)
class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class, class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class)
module_name)
package = jni_generator.ProxyHelpers.GetPackage(is_natives_class) package = jni_generator.ProxyHelpers.GetPackage(is_natives_class)
if forwarding or not (options.use_proxy_hash if forwarding or not (proxy_opts.use_hash
or options.enable_jni_multiplexing): or proxy_opts.enable_jni_multiplexing):
fields = string.Template("""\ fields = string.Template("""\
public static final boolean TESTING_ENABLED = ${TESTING_ENABLED}; public static final boolean TESTING_ENABLED = ${TESTING_ENABLED};
public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK}; public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK};
""").substitute({ """).substitute({
'TESTING_ENABLED': str(options.enable_proxy_mocks).lower(), 'TESTING_ENABLED': str(proxy_opts.enable_mocks).lower(),
'REQUIRE_MOCK': str(options.require_mocks).lower(), 'REQUIRE_MOCK': str(proxy_opts.require_mocks).lower(),
}) })
else: else:
fields = '' fields = ''
@ -364,7 +371,10 @@ ${METHODS}
}) })
def CreateFromDict(options, module_name, registration_dict): def CreateFromDict(registration_dict,
use_hash,
enable_jni_multiplexing=False,
manual_jni_registration=False):
"""Returns the content of the header file.""" """Returns the content of the header file."""
template = string.Template("""\ template = string.Template("""\
@ -398,8 +408,9 @@ ${FORWARDING_CALLS}
${MANUAL_REGISTRATION} ${MANUAL_REGISTRATION}
#endif // ${HEADER_GUARD} #endif // ${HEADER_GUARD}
""") """)
_SetProxyRegistrationFields(options, module_name, registration_dict) _SetProxyRegistrationFields(registration_dict, use_hash,
if not options.enable_jni_multiplexing: enable_jni_multiplexing, manual_jni_registration)
if not enable_jni_multiplexing:
registration_dict['FORWARDING_CALLS'] = '' registration_dict['FORWARDING_CALLS'] = ''
if len(registration_dict['FORWARD_DECLARATIONS']) == 0: if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return '' return ''
@ -425,13 +436,19 @@ def _GetJavaToNativeParamsList(params_list):
return 'jlong switch_num, ' + ', '.join(params_in_stub) return 'jlong switch_num, ' + ', '.join(params_in_stub)
class DictionaryGenerator(object): class HeaderGenerator(object):
"""Generates an inline header file for JNI registration.""" """Generates an inline header file for JNI registration."""
def __init__(self, options, module_name, content_namespace, def __init__(self,
fully_qualified_class, natives, jni_params, main_dex): namespace,
self.options = options content_namespace,
self.module_name = module_name fully_qualified_class,
natives,
jni_params,
main_dex,
use_proxy_hash,
enable_jni_multiplexing=False):
self.namespace = namespace
self.content_namespace = content_namespace self.content_namespace = content_namespace
self.natives = natives self.natives = natives
self.proxy_natives = [n for n in natives if n.is_proxy] self.proxy_natives = [n for n in natives if n.is_proxy]
@ -442,17 +459,15 @@ class DictionaryGenerator(object):
self.main_dex = main_dex self.main_dex = main_dex
self.helper = jni_generator.HeaderFileGeneratorHelper( self.helper = jni_generator.HeaderFileGeneratorHelper(
self.class_name, self.class_name,
self.module_name,
fully_qualified_class, fully_qualified_class,
options.use_proxy_hash, use_proxy_hash,
enable_jni_multiplexing=options.enable_jni_multiplexing) enable_jni_multiplexing=enable_jni_multiplexing)
self.use_proxy_hash = use_proxy_hash
self.enable_jni_multiplexing = enable_jni_multiplexing
self.registration_dict = None self.registration_dict = None
def Generate(self): def Generate(self):
self.registration_dict = { self.registration_dict = {'FULL_CLASS_NAME': self.fully_qualified_class}
'FULL_CLASS_NAME': self.fully_qualified_class,
'MODULE_NAME': self.module_name
}
self._AddClassPathDeclarations() self._AddClassPathDeclarations()
self._AddForwardDeclaration() self._AddForwardDeclaration()
self._AddJNINativeMethodsArrays() self._AddJNINativeMethodsArrays()
@ -461,16 +476,19 @@ class DictionaryGenerator(object):
self._AddRegisterNativesFunctions() self._AddRegisterNativesFunctions()
self.registration_dict['PROXY_NATIVE_SIGNATURES'] = (''.join( self.registration_dict['PROXY_NATIVE_SIGNATURES'] = (''.join(
_MakeProxySignature(self.options, native) _MakeProxySignature(
native,
self.use_proxy_hash,
enable_jni_multiplexing=self.enable_jni_multiplexing)
for native in self.proxy_natives)) for native in self.proxy_natives))
if self.enable_jni_multiplexing:
if self.options.enable_jni_multiplexing:
self._AssignSwitchNumberToNatives() self._AssignSwitchNumberToNatives()
self._AddCases() self._AddCases()
if self.options.use_proxy_hash or self.options.enable_jni_multiplexing: if self.use_proxy_hash or self.enable_jni_multiplexing:
self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join( self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join(
_MakeForwardingProxy(self.options, self.module_name, native) _MakeForwardingProxy(
native, enable_jni_multiplexing=self.enable_jni_multiplexing)
for native in self.proxy_natives)) for native in self.proxy_natives))
return self.registration_dict return self.registration_dict
@ -564,11 +582,10 @@ ${KMETHODS}
if native.is_proxy: if native.is_proxy:
# Literal name of the native method in the class that contains the actual # Literal name of the native method in the class that contains the actual
# native declaration. # native declaration.
if self.options.enable_jni_multiplexing: if self.enable_jni_multiplexing:
return_type, params_list = native.return_and_signature return_type, params_list = native.return_and_signature
class_name = jni_generator.EscapeClassName( class_name = jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass( jni_generator.ProxyHelpers.GetQualifiedClass(True) + self.namespace)
True, self.module_name))
proxy_signature = jni_generator.EscapeClassName( proxy_signature = jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list)) _GetMultiplexProxyName(return_type, params_list))
@ -577,7 +594,7 @@ ${KMETHODS}
[jni_generator.Param(datatype='long', name='switch_num')] + [jni_generator.Param(datatype='long', name='switch_num')] +
native.params, native.return_type) native.params, native.return_type)
stub_name = 'Java_' + class_name + '_' + proxy_signature stub_name = 'Java_' + class_name + '_' + proxy_signature
elif self.options.use_proxy_hash: elif self.use_proxy_hash:
name = native.hashed_proxy_name name = native.hashed_proxy_name
else: else:
name = native.proxy_name name = native.proxy_name
@ -591,7 +608,7 @@ ${KMETHODS}
def _AddProxyNativeMethodKStrings(self): def _AddProxyNativeMethodKStrings(self):
"""Returns KMethodString for wrapped native methods in all_classes """ """Returns KMethodString for wrapped native methods in all_classes """
if self.main_dex or self.options.enable_jni_multiplexing: if self.main_dex or self.enable_jni_multiplexing:
key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX' key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'
else: else:
key = 'PROXY_NATIVE_METHOD_ARRAY' key = 'PROXY_NATIVE_METHOD_ARRAY'
@ -601,7 +618,7 @@ ${KMETHODS}
self._SetDictValue(key, proxy_k_strings) self._SetDictValue(key, proxy_k_strings)
def _SubstituteNativeMethods(self, template): def _SubstituteNativeMethods(self, template, sub_proxy=False):
"""Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided """Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided
template.""" template."""
ret = [] ret = []
@ -609,10 +626,10 @@ ${KMETHODS}
all_classes[self.class_name] = self.fully_qualified_class all_classes[self.class_name] = self.fully_qualified_class
for clazz, full_clazz in all_classes.items(): for clazz, full_clazz in all_classes.items():
if clazz == jni_generator.ProxyHelpers.GetClass( if not sub_proxy:
self.options.use_proxy_hash or self.options.enable_jni_multiplexing, if clazz == jni_generator.ProxyHelpers.GetClass(
self.module_name): self.use_proxy_hash or self.enable_jni_multiplexing):
continue continue
kmethods = self._GetKMethodsString(clazz) kmethods = self._GetKMethodsString(clazz)
namespace_str = '' namespace_str = ''
@ -706,8 +723,6 @@ ${NATIVES}\
params = _GetParamsListForMultiplex(signature[1], with_types=False) params = _GetParamsListForMultiplex(signature[1], with_types=False)
values = { values = {
'SWITCH_NUM': native.switch_num, 'SWITCH_NUM': native.switch_num,
# We are forced to call the generated stub instead of the impl because
# the impl is not guaranteed to have a globally unique name.
'STUB_NAME': self.helper.GetStubName(native), 'STUB_NAME': self.helper.GetStubName(native),
'PARAMS': params, 'PARAMS': params,
} }
@ -763,7 +778,7 @@ def _GetMultiplexProxyName(return_type, params_list):
return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params
def _MakeForwardingProxy(options, module_name, proxy_native): def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
template = string.Template(""" template = string.Template("""
public static ${RETURN_TYPE} ${METHOD_NAME}(${PARAMS_WITH_TYPES}) { public static ${RETURN_TYPE} ${METHOD_NAME}(${PARAMS_WITH_TYPES}) {
${MAYBE_RETURN}${PROXY_CLASS}.${PROXY_METHOD_NAME}(${PARAM_NAMES}); ${MAYBE_RETURN}${PROXY_CLASS}.${PROXY_METHOD_NAME}(${PARAM_NAMES});
@ -772,9 +787,9 @@ def _MakeForwardingProxy(options, module_name, proxy_native):
params_with_types = ', '.join( params_with_types = ', '.join(
'%s %s' % (p.datatype, p.name) for p in proxy_native.params) '%s %s' % (p.datatype, p.name) for p in proxy_native.params)
param_names = ', '.join(p.name for p in proxy_native.params) param_names = ', '.join(p.name for p in proxy_native.params)
proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name) proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True)
if options.enable_jni_multiplexing: if enable_jni_multiplexing:
if not param_names: if not param_names:
param_names = proxy_native.switch_num + 'L' param_names = proxy_native.switch_num + 'L'
else: else:
@ -802,13 +817,15 @@ def _MakeForwardingProxy(options, module_name, proxy_native):
}) })
def _MakeProxySignature(options, proxy_native): def _MakeProxySignature(proxy_native,
use_proxy_hash,
enable_jni_multiplexing=False):
params_with_types = ', '.join('%s %s' % (p.datatype, p.name) params_with_types = ', '.join('%s %s' % (p.datatype, p.name)
for p in proxy_native.params) for p in proxy_native.params)
native_method_line = """ native_method_line = """
public static native ${RETURN} ${PROXY_NAME}(${PARAMS_WITH_TYPES});""" public static native ${RETURN} ${PROXY_NAME}(${PARAMS_WITH_TYPES});"""
if options.enable_jni_multiplexing: if enable_jni_multiplexing:
# This has to be only one line and without comments because all the proxy # This has to be only one line and without comments because all the proxy
# signatures will be joined, then split on new lines with duplicates removed # signatures will be joined, then split on new lines with duplicates removed
# since multiple |proxy_native|s map to the same multiplexed signature. # since multiple |proxy_native|s map to the same multiplexed signature.
@ -819,7 +836,7 @@ def _MakeProxySignature(options, proxy_native):
proxy_name = _GetMultiplexProxyName(return_type, params_list) proxy_name = _GetMultiplexProxyName(return_type, params_list)
params_with_types = 'long switch_num' + _GetParamsListForMultiplex( params_with_types = 'long switch_num' + _GetParamsListForMultiplex(
params_list, with_types=True) params_list, with_types=True)
elif options.use_proxy_hash: elif use_proxy_hash:
signature_template = string.Template(""" signature_template = string.Template("""
// Original name: ${ALT_NAME}""" + native_method_line) // Original name: ${ALT_NAME}""" + native_method_line)
@ -842,6 +859,18 @@ def _MakeProxySignature(options, proxy_native):
}) })
class ProxyOptions:
def __init__(self, **kwargs):
self.use_hash = kwargs.get('use_hash', False)
self.enable_jni_multiplexing = kwargs.get('enable_jni_multiplexing', False)
self.manual_jni_registration = kwargs.get('manual_jni_registration', False)
self.enable_mocks = kwargs.get('enable_mocks', False)
self.require_mocks = kwargs.get('require_mocks', False)
# Can never require and disable.
assert self.enable_mocks or not self.require_mocks
def main(argv): def main(argv):
arg_parser = argparse.ArgumentParser() arg_parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(arg_parser) build_utils.AddDepfileOption(arg_parser)
@ -859,56 +888,64 @@ def main(argv):
required=True, required=True,
help='Path to output srcjar for GEN_JNI.java (and J/N.java if proxy' help='Path to output srcjar for GEN_JNI.java (and J/N.java if proxy'
' hash is enabled).') ' hash is enabled).')
arg_parser.add_argument('--file-exclusions', arg_parser.add_argument(
default=[], '--sources-exclusions',
help='A list of Java files which should be ignored ' default=[],
'by the parser.') help='A list of Java files which should be ignored '
'by the parser.')
arg_parser.add_argument( arg_parser.add_argument(
'--namespace', '--namespace',
default='', default='',
help='Native namespace to wrap the registration functions ' help='Namespace to wrap the registration functions '
'into.') 'into.')
# TODO(crbug.com/898261) hook these flags up to the build config to enable # TODO(crbug.com/898261) hook these flags up to the build config to enable
# mocking in instrumentation tests # mocking in instrumentation tests
arg_parser.add_argument( arg_parser.add_argument(
'--enable-proxy-mocks', '--enable_proxy_mocks',
default=False, default=False,
action='store_true', action='store_true',
help='Allows proxy native impls to be mocked through Java.') help='Allows proxy native impls to be mocked through Java.')
arg_parser.add_argument( arg_parser.add_argument(
'--require-mocks', '--require_mocks',
default=False, default=False,
action='store_true', action='store_true',
help='Requires all used native implementations to have a mock set when ' help='Requires all used native implementations to have a mock set when '
'called. Otherwise an exception will be thrown.') 'called. Otherwise an exception will be thrown.')
arg_parser.add_argument( arg_parser.add_argument(
'--use-proxy-hash', '--use_proxy_hash',
action='store_true', action='store_true',
help='Enables hashing of the native declaration for methods in ' help='Enables hashing of the native declaration for methods in '
'an @JniNatives interface') 'an @JniNatives interface')
arg_parser.add_argument( arg_parser.add_argument(
'--enable-jni-multiplexing', '--enable_jni_multiplexing',
action='store_true', action='store_true',
help='Enables JNI multiplexing for Java native methods') help='Enables JNI multiplexing for Java native methods')
arg_parser.add_argument( arg_parser.add_argument(
'--manual-jni-registration', '--manual_jni_registration',
action='store_true', action='store_true',
help='Manually do JNI registration - required for crazy linker') help='Manually do JNI registration - required for crazy linker')
arg_parser.add_argument('--include-test-only', arg_parser.add_argument('--include_test_only',
action='store_true', action='store_true',
help='Whether to maintain ForTesting JNI methods.') help='Whether to maintain ForTesting JNI methods.')
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:])) args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
if not args.enable_proxy_mocks and args.require_mocks: if not args.enable_proxy_mocks and args.require_mocks:
arg_parser.error( arg_parser.error(
'Invalid arguments: --require-mocks without --enable-proxy-mocks. ' 'Invalid arguments: --require_mocks without --enable_proxy_mocks. '
'Cannot require mocks if they are not enabled.') 'Cannot require mocks if they are not enabled.')
if not args.header_path and args.manual_jni_registration: if not args.header_path and args.manual_jni_registration:
arg_parser.error( arg_parser.error(
'Invalid arguments: --manual-jni-registration without --header-path. ' 'Invalid arguments: --manual_jni_registration without --header-path. '
'Cannot manually register JNI if there is no output header file.') 'Cannot manually register JNI if there is no output header file.')
sources_files = sorted(set(build_utils.ParseGnList(args.sources_files))) sources_files = sorted(set(build_utils.ParseGnList(args.sources_files)))
proxy_opts = ProxyOptions(
use_hash=args.use_proxy_hash,
enable_jni_multiplexing=args.enable_jni_multiplexing,
manual_jni_registration=args.manual_jni_registration,
require_mocks=args.require_mocks,
enable_mocks=args.enable_proxy_mocks)
java_file_paths = [] java_file_paths = []
for f in sources_files: for f in sources_files:
@ -916,8 +953,13 @@ def main(argv):
# skip Kotlin files as they are not supported by JNI generation. # skip Kotlin files as they are not supported by JNI generation.
java_file_paths.extend( java_file_paths.extend(
p for p in build_utils.ReadSourcesList(f) if p.startswith('..') p for p in build_utils.ReadSourcesList(f) if p.startswith('..')
and p not in args.file_exclusions and not p.endswith('.kt')) and p not in args.sources_exclusions and not p.endswith('.kt'))
_Generate(args, java_file_paths) _Generate(java_file_paths,
args.srcjar_path,
proxy_opts=proxy_opts,
header_path=args.header_path,
namespace=args.namespace,
include_test_only=args.include_test_only)
if args.depfile: if args.depfile:
build_utils.WriteDepfile(args.depfile, args.srcjar_path, build_utils.WriteDepfile(args.depfile, args.srcjar_path,

View File

@ -3,7 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/android/jni_generator/jni_registration_generated.h" #include "base/android/jni_generator/sample_jni_apk__final_jni_generated.h"
#include "base/android/jni_utils.h" #include "base/android/jni_utils.h"
// This is called by the VM when the shared library is first loaded. // This is called by the VM when the shared library is first loaded.

View File

@ -1,105 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/meminfo_dump_provider.h"
#include <jni.h>
#include "base/android/jni_android.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/base_jni_headers/MemoryInfoBridge_jni.h"
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base::android {
MeminfoDumpProvider::MeminfoDumpProvider() {
#if BUILDFLAG(ENABLE_BASE_TRACING)
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, kDumpProviderName, nullptr);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
MeminfoDumpProvider& MeminfoDumpProvider::Initialize() {
static base::NoDestructor<MeminfoDumpProvider> instance;
return *instance.get();
}
bool MeminfoDumpProvider::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// This is best-effort, and will be wrong if there are other callers of
// ActivityManager#getProcessMemoryInfo(), either in this process or from
// another process which is allowed to do so (typically, adb).
//
// However, since the framework doesn't document throttling in any non-vague
// terms and the results are not timestamped, this is the best we can do. The
// delay and the rest of the assumptions here come from
// https://android.googlesource.com/platform/frameworks/base/+/refs/heads/android13-dev/services/core/java/com/android/server/am/ActivityManagerService.java#4093.
//
// We could always report the value on pre-Q devices, but that would skew
// reported data. Also, some OEMs may have cherry-picked the Q change, meaning
// that it's safer and more accurate to not report likely-stale data on all
// Android releases.
base::TimeTicks now = base::TimeTicks::Now();
bool stale_data = (now - last_collection_time_) < base::Minutes(5);
// Background data dumps (as in the BACKGROUND level of detail, not the
// application being in background) should not include stale data, since it
// would confuse data in UMA. In particular, the background/foreground session
// filter would no longer be accurate.
if (stale_data && args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::DETAILED) {
return true;
}
base::trace_event::MemoryAllocatorDump* dump =
pmd->CreateAllocatorDump(kDumpName);
// Data is either expected to be fresh, or this is a manually requested dump,
// and we should still report data, but note that it is stale.
dump->AddScalar(kIsStaleName, "bool", stale_data);
last_collection_time_ = now;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> memory_info =
Java_MemoryInfoBridge_getActivityManagerMemoryInfoForSelf(env);
// Tell the manager that collection failed. Since this is likely not a
// transient failure, don't return an empty dump, and let the manager exclude
// this provider from the next dump.
if (memory_info.is_null()) {
LOG(WARNING) << "Got a null value";
return false;
}
ScopedJavaLocalRef<jclass> clazz{env, env->GetObjectClass(memory_info.obj())};
jfieldID other_private_dirty_id =
env->GetFieldID(clazz.obj(), "otherPrivateDirty", "I");
jfieldID other_pss_id = env->GetFieldID(clazz.obj(), "otherPss", "I");
int other_private_dirty_kb =
env->GetIntField(memory_info.obj(), other_private_dirty_id);
int other_pss_kb = env->GetIntField(memory_info.obj(), other_pss_id);
// What "other" covers is not documented in Debug#MemoryInfo, nor in
// ActivityManager#getProcessMemoryInfo. However, it calls
// Debug#getMemoryInfo(), which ends up summing all the heaps in the range
// [HEAP_DALVIK_OTHER, HEAP_OTHER_MEMTRACK]. See the definitions in
// https://android.googlesource.com/platform/frameworks/base/+/0b7c1774ba42daef7c80bf2f00fe1c0327e756ae/core/jni/android_os_Debug.cpp#60,
// and the code in android_os_Debug_getDirtyPagesPid() in the same file.
dump->AddScalar(kPrivateDirtyMetricName, "bytes",
static_cast<uint64_t>(other_private_dirty_kb) * 1024);
dump->AddScalar(kPssMetricName, "bytes",
static_cast<uint64_t>(other_pss_kb) * 1024);
return true;
#else // BUILDFLAG(ENABLE_BASE_TRACING)
return false;
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace base::android

View File

@ -1,38 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#define BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#include "base/base_export.h"
#include "base/no_destructor.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
namespace base::android {
class BASE_EXPORT MeminfoDumpProvider
: public base::trace_event::MemoryDumpProvider {
public:
// Returns the instance for testing.
static MeminfoDumpProvider& Initialize();
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
static constexpr char kDumpProviderName[] = "android_meminfo";
static constexpr char kDumpName[] = "meminfo";
static constexpr char kIsStaleName[] = "is_stale";
static constexpr char kPssMetricName[] = "other_pss";
static constexpr char kPrivateDirtyMetricName[] = "other_private_dirty";
private:
friend class base::NoDestructor<MeminfoDumpProvider>;
MeminfoDumpProvider();
base::TimeTicks last_collection_time_;
};
} // namespace base::android
#endif // BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_

View File

@ -4,7 +4,6 @@
#include "base/android/callback_android.h" #include "base/android/callback_android.h"
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h" #include "base/android/jni_string.h"
#include "base/base_jni_headers/NativeUmaRecorder_jni.h" #include "base/base_jni_headers/NativeUmaRecorder_jni.h"
#include "base/format_macros.h" #include "base/format_macros.h"
@ -265,36 +264,6 @@ jint JNI_NativeUmaRecorder_GetHistogramTotalCountForTesting(
return actual_count; return actual_count;
} }
// Returns an array with 3 entries for each bucket, representing (min, max,
// count).
ScopedJavaLocalRef<jlongArray>
JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(
JNIEnv* env,
const JavaParamRef<jstring>& histogram_name) {
std::string name = android::ConvertJavaStringToUTF8(env, histogram_name);
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
std::vector<int64_t> buckets;
if (histogram == nullptr) {
// No samples have been recorded for this histogram.
return base::android::ToJavaLongArray(env, buckets);
}
std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
for (auto sampleCountIterator = samples->Iterator();
!sampleCountIterator->Done(); sampleCountIterator->Next()) {
HistogramBase::Sample min;
int64_t max;
HistogramBase::Count count;
sampleCountIterator->Get(&min, &max, &count);
buckets.push_back(min);
buckets.push_back(max);
buckets.push_back(count);
}
return base::android::ToJavaLongArray(env, buckets);
}
jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) { jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) {
HistogramsSnapshot* snapshot = new HistogramsSnapshot(); HistogramsSnapshot* snapshot = new HistogramsSnapshot();
for (const auto* const histogram : StatisticsRecorder::GetHistograms()) { for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {

View File

@ -240,7 +240,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This can get very large as it constructs the whole data structure in // This can get very large as it constructs the whole data structure in
// memory before dumping it to the file. // memory before dumping it to the file.
Value root(Value::Type::DICT); Value root(Value::Type::DICTIONARY);
uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed); uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed);
root.SetStringKey("total_calls_count", root.SetStringKey("total_calls_count",
base::StringPrintf("%" PRIu32, total_calls_count)); base::StringPrintf("%" PRIu32, total_calls_count));
@ -252,7 +252,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This callee was never called. // This callee was never called.
continue; continue;
Value callee_element(Value::Type::DICT); Value callee_element(Value::Type::DICTIONARY);
uint32_t callee_offset = i * 4; uint32_t callee_offset = i * 4;
callee_element.SetStringKey("index", callee_element.SetStringKey("index",
base::StringPrintf("%" PRIuS, caller_index)); base::StringPrintf("%" PRIuS, caller_index));
@ -278,7 +278,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// No misses. // No misses.
continue; continue;
Value caller_count(Value::Type::DICT); Value caller_count(Value::Type::DICTIONARY);
caller_count.SetStringKey("caller_offset", caller_count.SetStringKey("caller_offset",
base::StringPrintf("%" PRIu32, caller_offset)); base::StringPrintf("%" PRIu32, caller_offset));
caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count)); caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count));

View File

@ -2,21 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
// IMPORTANT NOTE: deprecated. Use std::atomic instead.
//
// Rationale:
// - Uniformity: most of the code uses std::atomic, and the underlying
// implementation is the same. Use the STL one.
// - Clearer code: return values from some operations (e.g. CompareAndSwap)
// differ from the equivalent ones in std::atomic, leading to confusion.
// - Richer semantics: can use actual types, rather than e.g. Atomic32 for a
// boolean flag, or AtomicWord for T*. Bitwise operations (e.g. fetch_or())
// are only in std::atomic.
// - Harder to misuse: base::subtle::Atomic32 is just an int, making it possible
// to accidentally manipulate, not realizing that there are no atomic
// semantics attached to it. For instance, "Atomic32 a; a++;" is almost
// certainly incorrect.
// For atomic operations on reference counts, see atomic_refcount.h. // For atomic operations on reference counts, see atomic_refcount.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h. // For atomic operations on sequence numbers, see atomic_sequence_num.h.

View File

@ -137,8 +137,9 @@ class BASE_EXPORT BigEndianWriter {
template<typename T> template<typename T>
bool Write(T v); bool Write(T v);
raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> ptr_; // TODO(crbug.com/1298696): Breaks net_unittests.
raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> end_; raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> ptr_;
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> end_;
}; };
} // namespace base } // namespace base

View File

@ -180,24 +180,10 @@ std::ostream& CheckError::stream() {
} }
CheckError::~CheckError() { CheckError::~CheckError() {
// TODO(crbug.com/1409729): Consider splitting out CHECK from DCHECK so that
// the destructor can be marked [[noreturn]] and we don't need to check
// severity in the destructor.
const bool is_fatal = log_message_->severity() == LOGGING_FATAL;
// Note: This function ends up in crash stack traces. If its full name // Note: This function ends up in crash stack traces. If its full name
// changes, the crash server's magic signature logic needs to be updated. // changes, the crash server's magic signature logic needs to be updated.
// See cl/306632920. // See cl/306632920.
delete log_message_; delete log_message_;
// Make sure we crash even if LOG(FATAL) has been overridden.
// TODO(crbug.com/1409729): Include Windows here too. This is done in steps to
// prevent backsliding on platforms where this goes through CQ.
// Windows is blocked by:
// * All/RenderProcessHostWriteableFileDeathTest.
// PassUnsafeWriteableExecutableFile/2
if (is_fatal && !BUILDFLAG(IS_WIN)) {
base::ImmediateCrash();
}
} }
NotReachedError NotReachedError::NotReached(const char* file, int line) { NotReachedError NotReachedError::NotReached(const char* file, int line) {
@ -212,47 +198,13 @@ NotReachedError NotReachedError::NotReached(const char* file, int line) {
} }
void NotReachedError::TriggerNotReached() { void NotReachedError::TriggerNotReached() {
// This triggers a NOTREACHED() error as the returned NotReachedError goes out // TODO(pbos): Add back NotReachedError("", -1) here asap. This was removed to
// of scope. // disable NOTREACHED() reports temporarily for M111 and should be added
NotReached("", -1); // back once this change has merged to M111.
} }
NotReachedError::~NotReachedError() = default; NotReachedError::~NotReachedError() = default;
NotReachedNoreturnError::NotReachedNoreturnError(const char* file, int line)
: CheckError([file, line]() {
auto* const log_message = new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "NOTREACHED hit. ";
return log_message;
}()) {}
// Note: This function ends up in crash stack traces. If its full name changes,
// the crash server's magic signature logic needs to be updated. See
// cl/306632920.
NotReachedNoreturnError::~NotReachedNoreturnError() {
delete log_message_;
// Make sure we die if we haven't. LOG(FATAL) is not yet [[noreturn]] as of
// writing this.
base::ImmediateCrash();
}
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
is_dcheck ? new DCheckLogMessage(file, line, LOGGING_DCHECK)
: new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
void RawCheck(const char* message) { void RawCheck(const char* message) {
RawLog(LOGGING_FATAL, message); RawLog(LOGGING_FATAL, message);
} }

View File

@ -93,7 +93,7 @@ class BASE_EXPORT CheckError {
return stream() << streamed_type; return stream() << streamed_type;
} }
protected: private:
LogMessage* const log_message_; LogMessage* const log_message_;
}; };
@ -113,15 +113,6 @@ class BASE_EXPORT NotReachedError : public CheckError {
using CheckError::CheckError; using CheckError::CheckError;
}; };
// TODO(crbug.com/851128): This should take the name of the above class once all
// callers of NOTREACHED() have migrated to the CHECK-fatal version.
class BASE_EXPORT NotReachedNoreturnError : public CheckError {
public:
NotReachedNoreturnError(const char* file, int line);
[[noreturn]] NOMERGE NOINLINE NOT_TAIL_CALLED ~NotReachedNoreturnError();
};
// The 'switch' is used to prevent the 'else' from being ambiguous when the // The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as: // macro is used in an 'if' clause such as:
// if (a == 1) // if (a == 1)

View File

@ -76,4 +76,19 @@ char* StreamValToStr(const void* v,
return strdup(ss.str().c_str()); return strdup(ss.str().c_str());
} }
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
new LogMessage(file, line, is_dcheck ? LOGGING_DCHECK : LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
} // namespace logging } // namespace logging

View File

@ -64,7 +64,7 @@
// folding of multiple identical caller functions into a single signature. To // folding of multiple identical caller functions into a single signature. To
// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h. // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
// Use like: // Use like:
// NOT_TAIL_CALLED void FooBar(); // void NOT_TAIL_CALLED FooBar();
#if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called) #if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called)
#define NOT_TAIL_CALLED __attribute__((not_tail_called)) #define NOT_TAIL_CALLED __attribute__((not_tail_called))
#else #else

View File

@ -253,15 +253,6 @@ class EnumSet {
// Removes all values from our set. // Removes all values from our set.
void Clear() { enums_.reset(); } void Clear() { enums_.reset(); }
// Conditionally puts or removes `value`, based on `should_be_present`.
void PutOrRemove(E value, bool should_be_present) {
if (should_be_present) {
Put(value);
} else {
Remove(value);
}
}
// Returns true iff the given value is in range and a member of our set. // Returns true iff the given value is in range and a member of our set.
constexpr bool Has(E value) const { constexpr bool Has(E value) const {
return InRange(value) && enums_[ToIndex(value)]; return InRange(value) && enums_[ToIndex(value)];

View File

@ -250,7 +250,7 @@ class small_map {
inline explicit iterator(const typename NormalMap::iterator& init) inline explicit iterator(const typename NormalMap::iterator& init)
: array_iter_(nullptr), map_iter_(init) {} : array_iter_(nullptr), map_iter_(init) {}
raw_ptr<value_type, AllowPtrArithmetic> array_iter_; raw_ptr<value_type> array_iter_;
typename NormalMap::iterator map_iter_; typename NormalMap::iterator map_iter_;
}; };
@ -327,7 +327,7 @@ class small_map {
const typename NormalMap::const_iterator& init) const typename NormalMap::const_iterator& init)
: array_iter_(nullptr), map_iter_(init) {} : array_iter_(nullptr), map_iter_(init) {}
raw_ptr<const value_type, AllowPtrArithmetic> array_iter_; raw_ptr<const value_type> array_iter_;
typename NormalMap::const_iterator map_iter_; typename NormalMap::const_iterator map_iter_;
}; };

View File

@ -18,7 +18,6 @@
#include "base/containers/checked_iterators.h" #include "base/containers/checked_iterators.h"
#include "base/containers/contiguous_iterator.h" #include "base/containers/contiguous_iterator.h"
#include "base/cxx20_to_address.h" #include "base/cxx20_to_address.h"
#include "base/numerics/safe_math.h"
namespace base { namespace base {
@ -257,16 +256,16 @@ class GSL_POINTER span : public internal::ExtentStorage<Extent> {
template <typename It, template <typename It,
typename = internal::EnableIfCompatibleContiguousIterator<It, T>> typename = internal::EnableIfCompatibleContiguousIterator<It, T>>
constexpr span(It first, StrictNumeric<size_t> count) noexcept constexpr span(It first, size_t count) noexcept
: ExtentStorage(count), : ExtentStorage(count),
// The use of to_address() here is to handle the case where the iterator // The use of to_address() here is to handle the case where the iterator
// `first` is pointing to the container's `end()`. In that case we can // `first` is pointing to the container's `end()`. In that case we can
// not use the address returned from the iterator, or dereference it // not use the address returned from the iterator, or dereference it
// through the iterator's `operator*`, but we can store it. We must // through the iterator's `operator*`, but we can store it. We must assume
// assume in this case that `count` is 0, since the iterator does not // in this case that `count` is 0, since the iterator does not point to
// point to valid data. Future hardening of iterators may disallow // valid data. Future hardening of iterators may disallow pulling the
// pulling the address from `end()`, as demonstrated by asserts() in // address from `end()`, as demonstrated by asserts() in libstdc++:
// libstdc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960.
// //
// The span API dictates that the `data()` is accessible when size is 0, // The span API dictates that the `data()` is accessible when size is 0,
// since the pointer may be valid, so we cannot prevent storing and // since the pointer may be valid, so we cannot prevent storing and
@ -474,7 +473,7 @@ as_writable_bytes(span<T, X> s) noexcept {
// Type-deducing helpers for constructing a span. // Type-deducing helpers for constructing a span.
template <int&... ExplicitArgumentBarrier, typename It> template <int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept { constexpr auto make_span(It it, size_t size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>; using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T>(it, size); return span<T>(it, size);
} }
@ -509,7 +508,7 @@ constexpr auto make_span(Container&& container) noexcept {
// //
// Usage: auto static_span = base::make_span<N>(...); // Usage: auto static_span = base::make_span<N>(...);
template <size_t N, int&... ExplicitArgumentBarrier, typename It> template <size_t N, int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept { constexpr auto make_span(It it, size_t size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>; using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T, N>(it, size); return span<T, N>(it, size);
} }

View File

@ -5,13 +5,27 @@
#ifndef BASE_CXX17_BACKPORTS_H_ #ifndef BASE_CXX17_BACKPORTS_H_
#define BASE_CXX17_BACKPORTS_H_ #define BASE_CXX17_BACKPORTS_H_
#include <algorithm> #include <functional>
#include "base/check.h"
namespace base { namespace base {
// TODO(crbug.com/1373621): Rewrite all uses of base::clamp as std::clamp and // C++14 implementation of C++17's std::clamp():
// remove this file. // https://en.cppreference.com/w/cpp/algorithm/clamp
using std::clamp; // Please note that the C++ spec makes it undefined behavior to call std::clamp
// with a value of `lo` that compares greater than the value of `hi`. This
// implementation uses a CHECK to enforce this as a hard restriction.
template <typename T, typename Compare>
constexpr const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) {
CHECK(!comp(hi, lo));
return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
}
template <typename T>
constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
return base::clamp(v, lo, hi, std::less<T>{});
}
} // namespace base } // namespace base

View File

@ -1,2 +1,5 @@
# For activity tracking:
per-file activity_*=bcwhite@chromium.org
# For ASan integration: # For ASan integration:
per-file asan_service*=file://base/memory/MIRACLE_PTR_OWNERS per-file asan_service*=file://base/memory/MIRACLE_PTR_OWNERS

View File

@ -0,0 +1,407 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/debug/activity_analyzer.h"
#include <utility>
#include "base/check_op.h"
#include "base/containers/contains.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
namespace base {
namespace debug {
namespace {
const ActivityUserData::Snapshot& GetEmptyUserDataSnapshot() {
// An empty snapshot that can be returned when there otherwise is none.
static const NoDestructor<ActivityUserData::Snapshot> empty_snapshot;
return *empty_snapshot;
}
// DO NOT CHANGE VALUES. This is logged persistently in a histogram.
enum AnalyzerCreationError {
kInvalidMemoryMappedFile,
kPmaBadFile,
kPmaUninitialized,
kPmaDeleted,
kPmaCorrupt,
kAnalyzerCreationErrorMax // Keep this last.
};
void LogAnalyzerCreationError(AnalyzerCreationError error) {
UmaHistogramEnumeration("ActivityTracker.Collect.AnalyzerCreationError",
error, kAnalyzerCreationErrorMax);
}
} // namespace
ThreadActivityAnalyzer::Snapshot::Snapshot() = default;
ThreadActivityAnalyzer::Snapshot::~Snapshot() = default;
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
const ThreadActivityTracker& tracker)
: activity_snapshot_valid_(tracker.CreateSnapshot(&activity_snapshot_)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
: ThreadActivityAnalyzer(ThreadActivityTracker(base, size)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference)
: ThreadActivityAnalyzer(allocator->GetAsArray<char>(
reference,
GlobalActivityTracker::kTypeIdActivityTracker,
PersistentMemoryAllocator::kSizeAny),
allocator->GetAllocSize(reference)) {}
ThreadActivityAnalyzer::~ThreadActivityAnalyzer() = default;
void ThreadActivityAnalyzer::AddGlobalInformation(
GlobalActivityAnalyzer* global) {
if (!IsValid())
return;
// User-data is held at the global scope even though it's referenced at the
// thread scope.
activity_snapshot_.user_data_stack.clear();
for (auto& activity : activity_snapshot_.activity_stack) {
// The global GetUserDataSnapshot will return an empty snapshot if the ref
// or id is not valid.
activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
activity_snapshot_.process_id, activity.user_data_ref,
activity.user_data_id));
}
}
GlobalActivityAnalyzer::GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator)
: allocator_(std::move(allocator)),
analysis_stamp_(0LL),
allocator_iterator_(allocator_.get()) {
DCHECK(allocator_);
}
GlobalActivityAnalyzer::~GlobalActivityAnalyzer() = default;
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator) {
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
LogAnalyzerCreationError(kPmaUninitialized);
return nullptr;
}
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_DELETED) {
LogAnalyzerCreationError(kPmaDeleted);
return nullptr;
}
if (allocator->IsCorrupt()) {
LogAnalyzerCreationError(kPmaCorrupt);
return nullptr;
}
return std::make_unique<GlobalActivityAnalyzer>(std::move(allocator));
}
#if !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
const FilePath& file_path) {
// Map the file read-write so it can guarantee consistency between
// the analyzer and any trackers that my still be active.
std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
if (!mmfile->Initialize(file_path, MemoryMappedFile::READ_WRITE)) {
LogAnalyzerCreationError(kInvalidMemoryMappedFile);
return nullptr;
}
if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
LogAnalyzerCreationError(kPmaBadFile);
return nullptr;
}
return CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
std::move(mmfile), 0, 0, StringPiece(), /*readonly=*/true));
}
#endif // !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping) {
if (!mapping.IsValid() ||
!ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
mapping)) {
return nullptr;
}
return CreateWithAllocator(
std::make_unique<ReadOnlySharedPersistentMemoryAllocator>(
std::move(mapping), 0, StringPiece()));
}
ProcessId GlobalActivityAnalyzer::GetFirstProcess() {
PrepareAllAnalyzers();
return GetNextProcess();
}
ProcessId GlobalActivityAnalyzer::GetNextProcess() {
if (process_ids_.empty())
return 0;
ProcessId pid = process_ids_.back();
process_ids_.pop_back();
return pid;
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer(
ProcessId pid) {
analyzers_iterator_ = analyzers_.begin();
analyzers_iterator_pid_ = pid;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
int64_t create_stamp;
if (analyzers_iterator_->second->GetProcessId(&create_stamp) == pid &&
create_stamp <= analysis_stamp_) {
return analyzers_iterator_->second.get();
}
return GetNextAnalyzer();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetNextAnalyzer() {
DCHECK(analyzers_iterator_ != analyzers_.end());
int64_t create_stamp;
do {
++analyzers_iterator_;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
} while (analyzers_iterator_->second->GetProcessId(&create_stamp) !=
analyzers_iterator_pid_ ||
create_stamp > analysis_stamp_);
return analyzers_iterator_->second.get();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
const ThreadKey& key) {
auto found = analyzers_.find(key);
if (found == analyzers_.end())
return nullptr;
return found->second.get();
}
ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
ProcessId pid,
uint32_t ref,
uint32_t id) {
ActivityUserData::Snapshot snapshot;
void* memory = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdUserDataRecord,
PersistentMemoryAllocator::kSizeAny);
if (memory) {
size_t size = allocator_->GetAllocSize(ref);
const ActivityUserData user_data(memory, size);
user_data.CreateSnapshot(&snapshot);
ProcessId process_id;
int64_t create_stamp;
if (!ActivityUserData::GetOwningProcessId(memory, &process_id,
&create_stamp) ||
process_id != pid || user_data.id() != id) {
// This allocation has been overwritten since it was created. Return an
// empty snapshot because whatever was captured is incorrect.
snapshot.clear();
}
}
return snapshot;
}
const ActivityUserData::Snapshot&
GlobalActivityAnalyzer::GetProcessDataSnapshot(ProcessId pid) {
auto iter = process_data_.find(pid);
if (iter == process_data_.end())
return GetEmptyUserDataSnapshot();
if (iter->second.create_stamp > analysis_stamp_)
return GetEmptyUserDataSnapshot();
DCHECK_EQ(pid, iter->second.process_id);
return iter->second.data;
}
std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
std::vector<std::string> messages;
PersistentMemoryAllocator::Reference ref;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
while ((ref = iter.GetNextOfType(
GlobalActivityTracker::kTypeIdGlobalLogMessage)) != 0) {
const char* message = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdGlobalLogMessage,
PersistentMemoryAllocator::kSizeAny);
if (message)
messages.push_back(message);
}
return messages;
}
std::vector<GlobalActivityTracker::ModuleInfo>
GlobalActivityAnalyzer::GetModules(ProcessId pid) {
std::vector<GlobalActivityTracker::ModuleInfo> modules;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
const GlobalActivityTracker::ModuleInfoRecord* record;
while (
(record =
iter.GetNextOfObject<GlobalActivityTracker::ModuleInfoRecord>()) !=
nullptr) {
ProcessId process_id;
int64_t create_stamp;
if (!OwningProcess::GetOwningProcessId(&record->owner, &process_id,
&create_stamp) ||
pid != process_id || create_stamp > analysis_stamp_) {
continue;
}
GlobalActivityTracker::ModuleInfo info;
if (record->DecodeTo(&info, allocator_->GetAllocSize(
allocator_->GetAsReference(record)))) {
modules.push_back(std::move(info));
}
}
return modules;
}
GlobalActivityAnalyzer::ProgramLocation
GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
// This should be implemented but it's never been a priority.
return { 0, 0 };
}
bool GlobalActivityAnalyzer::IsDataComplete() const {
DCHECK(allocator_);
return !allocator_->IsFull();
}
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
const UserDataSnapshot& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
UserDataSnapshot&& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() = default;
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Record the time when analysis started.
analysis_stamp_ = base::Time::Now().ToInternalValue();
// Fetch all the records. This will retrieve only ones created since the
// last run since the PMA iterator will continue from where it left off.
uint32_t type;
PersistentMemoryAllocator::Reference ref;
while ((ref = allocator_iterator_.GetNext(&type)) != 0) {
switch (type) {
case GlobalActivityTracker::kTypeIdActivityTracker:
case GlobalActivityTracker::kTypeIdActivityTrackerFree:
case GlobalActivityTracker::kTypeIdProcessDataRecord:
case GlobalActivityTracker::kTypeIdProcessDataRecordFree:
case PersistentMemoryAllocator::kTypeIdTransitioning:
// Active, free, or transitioning: add it to the list of references
// for later analysis.
memory_references_.insert(ref);
break;
}
}
// Clear out any old information.
analyzers_.clear();
process_data_.clear();
process_ids_.clear();
std::set<ProcessId> seen_pids;
// Go through all the known references and create objects for them with
// snapshots of the current state.
for (PersistentMemoryAllocator::Reference memory_ref : memory_references_) {
// Get the actual data segment for the tracker. Any type will do since it
// is checked below.
void* const base = allocator_->GetAsArray<char>(
memory_ref, PersistentMemoryAllocator::kTypeIdAny,
PersistentMemoryAllocator::kSizeAny);
const size_t size = allocator_->GetAllocSize(memory_ref);
if (!base)
continue;
switch (allocator_->GetType(memory_ref)) {
case GlobalActivityTracker::kTypeIdActivityTracker: {
// Create the analyzer on the data. This will capture a snapshot of the
// tracker state. This can fail if the tracker is somehow corrupted or
// is in the process of shutting down.
std::unique_ptr<ThreadActivityAnalyzer> analyzer(
new ThreadActivityAnalyzer(base, size));
if (!analyzer->IsValid())
continue;
analyzer->AddGlobalInformation(this);
// Track PIDs.
ProcessId pid = analyzer->GetProcessId();
if (seen_pids.find(pid) == seen_pids.end()) {
process_ids_.push_back(pid);
seen_pids.insert(pid);
}
// Add this analyzer to the map of known ones, indexed by a unique
// thread
// identifier.
DCHECK(!base::Contains(analyzers_, analyzer->GetThreadKey()));
analyzer->allocator_reference_ = ref;
analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
} break;
case GlobalActivityTracker::kTypeIdProcessDataRecord: {
// Get the PID associated with this data record.
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
DCHECK(!base::Contains(process_data_, process_id));
// Create a snapshot of the data. This can fail if the data is somehow
// corrupted or the process shutdown and the memory being released.
UserDataSnapshot& snapshot = process_data_[process_id];
snapshot.process_id = process_id;
snapshot.create_stamp = create_stamp;
const ActivityUserData process_data(base, size);
if (!process_data.CreateSnapshot(&snapshot.data))
break;
// Check that nothing changed. If it did, forget what was recorded.
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
if (process_id != snapshot.process_id ||
create_stamp != snapshot.create_stamp) {
process_data_.erase(process_id);
break;
}
// Track PIDs.
if (seen_pids.find(process_id) == seen_pids.end()) {
process_ids_.push_back(process_id);
seen_pids.insert(process_id);
}
} break;
}
}
// Reverse the list of PIDs so that they get popped in the order found.
ranges::reverse(process_ids_);
}
} // namespace debug
} // namespace base

View File

@ -0,0 +1,260 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_DEBUG_ACTIVITY_ANALYZER_H_
#define BASE_DEBUG_ACTIVITY_ANALYZER_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/debug/activity_tracker.h"
#include "base/memory/shared_memory_mapping.h"
#include "build/build_config.h"
namespace base {
namespace debug {
class GlobalActivityAnalyzer;
// This class provides analysis of data captured from a ThreadActivityTracker.
// When created, it takes a snapshot of the data held by the tracker and
// makes that information available to other code.
class BASE_EXPORT ThreadActivityAnalyzer {
public:
struct BASE_EXPORT Snapshot : ThreadActivityTracker::Snapshot {
Snapshot();
~Snapshot();
// The user-data snapshot for an activity, matching the |activity_stack|
// of ThreadActivityTracker::Snapshot, if any.
std::vector<ActivityUserData::Snapshot> user_data_stack;
};
// This class provides keys that uniquely identify a thread, even across
// multiple processes.
class ThreadKey {
public:
ThreadKey(ProcessId pid, int64_t tid) : pid_(pid), tid_(tid) {}
bool operator<(const ThreadKey& rhs) const {
if (pid_ != rhs.pid_)
return pid_ < rhs.pid_;
return tid_ < rhs.tid_;
}
bool operator==(const ThreadKey& rhs) const {
return (pid_ == rhs.pid_ && tid_ == rhs.tid_);
}
private:
ProcessId pid_;
int64_t tid_;
};
// Creates an analyzer for an existing activity |tracker|. A snapshot is taken
// immediately and the tracker is not referenced again.
explicit ThreadActivityAnalyzer(const ThreadActivityTracker& tracker);
// Creates an analyzer for a block of memory currently or previously in-use
// by an activity-tracker. A snapshot is taken immediately and the memory
// is not referenced again.
ThreadActivityAnalyzer(void* base, size_t size);
// Creates an analyzer for a block of memory held within a persistent-memory
// |allocator| at the given |reference|. A snapshot is taken immediately and
// the memory is not referenced again.
ThreadActivityAnalyzer(PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference);
ThreadActivityAnalyzer(const ThreadActivityAnalyzer&) = delete;
ThreadActivityAnalyzer& operator=(const ThreadActivityAnalyzer&) = delete;
~ThreadActivityAnalyzer();
// Adds information from the global analyzer.
void AddGlobalInformation(GlobalActivityAnalyzer* global);
// Returns true iff the contained data is valid. Results from all other
// methods are undefined if this returns false.
bool IsValid() { return activity_snapshot_valid_; }
// Gets the process id and its creation stamp.
ProcessId GetProcessId(int64_t* out_stamp = nullptr) {
if (out_stamp)
*out_stamp = activity_snapshot_.create_stamp;
return activity_snapshot_.process_id;
}
// Gets the name of the thread.
const std::string& GetThreadName() {
return activity_snapshot_.thread_name;
}
// Gets the TheadKey for this thread.
ThreadKey GetThreadKey() {
return ThreadKey(activity_snapshot_.process_id,
activity_snapshot_.thread_id);
}
const Snapshot& activity_snapshot() { return activity_snapshot_; }
private:
friend class GlobalActivityAnalyzer;
// The snapshot of the activity tracker taken at the moment of construction.
Snapshot activity_snapshot_;
// Flag indicating if the snapshot data is valid.
bool activity_snapshot_valid_;
// A reference into a persistent memory allocator, used by the global
// analyzer to know where this tracker came from.
PersistentMemoryAllocator::Reference allocator_reference_ = 0;
};
// This class manages analyzers for all known processes and threads as stored
// in a persistent memory allocator. It supports retrieval of them through
// iteration and directly using a ThreadKey, which allows for cross-references
// to be resolved.
// Note that though atomic snapshots are used and everything has its snapshot
// taken at the same time, the multi-snapshot itself is not atomic and thus may
// show small inconsistencies between threads if attempted on a live system.
class BASE_EXPORT GlobalActivityAnalyzer {
public:
struct ProgramLocation {
int module;
uintptr_t offset;
};
using ThreadKey = ThreadActivityAnalyzer::ThreadKey;
// Creates a global analyzer from a persistent memory allocator.
explicit GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator);
GlobalActivityAnalyzer(const GlobalActivityAnalyzer&) = delete;
GlobalActivityAnalyzer& operator=(const GlobalActivityAnalyzer&) = delete;
~GlobalActivityAnalyzer();
// Creates a global analyzer using a given persistent-memory |allocator|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator);
#if !BUILDFLAG(IS_NACL)
// Creates a global analyzer using the contents of a file given in
// |file_path|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithFile(
const FilePath& file_path);
#endif // !BUILDFLAG(IS_NACL)
// Like above but accesses an allocator in a mapped shared-memory segment.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping);
// Iterates over all known valid processes and returns their PIDs or zero
// if there are no more. Calls to GetFirstProcess() will perform a global
// snapshot in order to provide a relatively consistent state across the
// future calls to GetNextProcess() and GetFirst/NextAnalyzer(). PIDs are
// returned in the order they're found meaning that a first-launched
// controlling process will be found first. Note, however, that space
// freed by an exiting process may be re-used by a later process.
ProcessId GetFirstProcess();
ProcessId GetNextProcess();
// Iterates over all known valid analyzers for the a given process or returns
// null if there are no more.
//
// GetFirstProcess() must be called first in order to capture a global
// snapshot! Ownership stays with the global analyzer object and all existing
// analyzer pointers are invalidated when GetFirstProcess() is called.
ThreadActivityAnalyzer* GetFirstAnalyzer(ProcessId pid);
ThreadActivityAnalyzer* GetNextAnalyzer();
// Gets the analyzer for a specific thread or null if there is none.
// Ownership stays with the global analyzer object.
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
// Extract user data based on a reference and its identifier.
ActivityUserData::Snapshot GetUserDataSnapshot(ProcessId pid,
uint32_t ref,
uint32_t id);
// Extract the data for a specific process. An empty snapshot will be
// returned if the process is not known.
const ActivityUserData::Snapshot& GetProcessDataSnapshot(ProcessId pid);
// Gets all log messages stored within.
std::vector<std::string> GetLogMessages();
// Gets modules corresponding to a pid. This pid must come from a call to
// GetFirst/NextProcess. Only modules that were first registered prior to
// GetFirstProcess's snapshot are returned.
std::vector<GlobalActivityTracker::ModuleInfo> GetModules(ProcessId pid);
// Gets the corresponding "program location" for a given "program counter".
// This will return {0,0} if no mapping could be found.
ProgramLocation GetProgramLocationFromAddress(uint64_t address);
// Returns whether the data is complete. Data can be incomplete if the
// recording size quota is hit.
bool IsDataComplete() const;
private:
using AnalyzerMap =
std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
struct UserDataSnapshot {
// Complex class needs out-of-line ctor/dtor.
UserDataSnapshot();
UserDataSnapshot(const UserDataSnapshot& rhs);
UserDataSnapshot(UserDataSnapshot&& rhs);
~UserDataSnapshot();
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::Snapshot data;
};
// Finds, creates, and indexes analyzers for all known processes and threads.
void PrepareAllAnalyzers();
// The persistent memory allocator holding all tracking data.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
// The time stamp when analysis began. This is used to prevent looking into
// process IDs that get reused when analyzing a live system.
int64_t analysis_stamp_;
// The iterator for finding tracking information in the allocator.
PersistentMemoryAllocator::Iterator allocator_iterator_;
// A set of all interesting memory references found within the allocator.
std::set<PersistentMemoryAllocator::Reference> memory_references_;
// A set of all process-data memory references found within the allocator.
std::map<ProcessId, UserDataSnapshot> process_data_;
// A set of all process IDs collected during PrepareAllAnalyzers. These are
// popped and returned one-by-one with calls to GetFirst/NextProcess().
std::vector<ProcessId> process_ids_;
// A map, keyed by ThreadKey, of all valid activity analyzers.
AnalyzerMap analyzers_;
// The iterator within the analyzers_ map for returning analyzers through
// first/next iteration.
AnalyzerMap::iterator analyzers_iterator_;
ProcessId analyzers_iterator_pid_;
};
} // namespace debug
} // namespace base
#endif // BASE_DEBUG_ACTIVITY_ANALYZER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,6 @@
#include <string.h> #include <string.h>
#include <sys/param.h> #include <sys/param.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h> #include <sys/types.h>
#include <unistd.h> #include <unistd.h>
@ -301,27 +300,6 @@ void PrintToStderr(const char* output) {
std::ignore = HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))); std::ignore = HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output)));
} }
#if BUILDFLAG(IS_LINUX)
void AlarmSignalHandler(int signal, siginfo_t* info, void* void_context) {
// We have seen rare cases on AMD linux where the default signal handler
// either does not run or a thread (Probably an AMD driver thread) prevents
// the termination of the gpu process. We catch this case when the alarm fires
// and then call exit_group() to kill all threads of the process. This has
// resolved the zombie gpu process issues we have seen on our context lost
// test.
// Note that many different calls were tried to kill the process when it is in
// this state. Only 'exit_group' was found to cause termination and it is
// speculated that only this works because only this exit kills all threads in
// the process (not simply the current thread).
// See: http://crbug.com/1396451.
PrintToStderr(
"Warning: Default signal handler failed to terminate process.\n");
PrintToStderr("Calling exit_group() directly to prevent timeout.\n");
// See: https://man7.org/linux/man-pages/man2/exit_group.2.html
syscall(SYS_exit_group, EXIT_FAILURE);
}
#endif // BUILDFLAG(IS_LINUX)
void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
// NOTE: This code MUST be async-signal safe. // NOTE: This code MUST be async-signal safe.
// NO malloc or stdio is allowed here. // NO malloc or stdio is allowed here.
@ -542,27 +520,11 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
PrintToStderr( PrintToStderr(
"Calling _exit(EXIT_FAILURE). Core file will not be generated.\n"); "Calling _exit(EXIT_FAILURE). Core file will not be generated.\n");
_exit(EXIT_FAILURE); _exit(EXIT_FAILURE);
#else // BUILDFLAG(IS_LINUX) #endif // !BUILDFLAG(IS_LINUX)
// After leaving this handler control flow returns to the point where the // After leaving this handler control flow returns to the point where the
// signal was raised, raising the current signal once again but executing the // signal was raised, raising the current signal once again but executing the
// default handler instead of this one. // default handler instead of this one.
// Set an alarm to trigger in case the default handler does not terminate
// the process. See 'AlarmSignalHandler' for more details.
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_flags = static_cast<int>(SA_RESETHAND);
action.sa_sigaction = &AlarmSignalHandler;
sigemptyset(&action.sa_mask);
sigaction(SIGALRM, &action, nullptr);
// 'alarm' function is signal handler safe.
// https://man7.org/linux/man-pages/man7/signal-safety.7.html
// This delay is set to be long enough for the real signal handler to fire but
// shorter than chrome's process watchdog timer.
constexpr unsigned int kAlarmSignalDelaySeconds = 5;
alarm(kAlarmSignalDelaySeconds);
#endif // !BUILDFLAG(IS_LINUX)
} }
class PrintBacktraceOutputHandler : public BacktraceOutputHandler { class PrintBacktraceOutputHandler : public BacktraceOutputHandler {

View File

@ -668,12 +668,7 @@ FeatureList::OverrideState FeatureList::GetOverrideState(
const Feature& feature) const { const Feature& feature) const {
DCHECK(initialized_); DCHECK(initialized_);
DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name; DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
DCHECK(CheckFeatureIdentity(feature)) DCHECK(CheckFeatureIdentity(feature)) << feature.name;
<< feature.name
<< " has multiple definitions. Either it is defined more than once in "
"code or (for component builds) the code is built into multiple "
"components (shared libraries) without a corresponding export "
"statement";
// If caching is disabled, always perform the full lookup. // If caching is disabled, always perform the full lookup.
if (!g_cache_override_state) if (!g_cache_override_state)

View File

@ -13,7 +13,7 @@ namespace base::features {
// backed by executable files. // backed by executable files.
BASE_FEATURE(kEnforceNoExecutableFileHandles, BASE_FEATURE(kEnforceNoExecutableFileHandles,
"EnforceNoExecutableFileHandles", "EnforceNoExecutableFileHandles",
FEATURE_ENABLED_BY_DEFAULT); FEATURE_DISABLED_BY_DEFAULT);
// Optimizes parsing and loading of data: URLs. // Optimizes parsing and loading of data: URLs.
BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT); BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT);

View File

@ -75,7 +75,9 @@ class BASE_EXPORT FileDescriptorWatcher {
// Controller is deleted, ownership of |watcher_| is transfered to a delete // Controller is deleted, ownership of |watcher_| is transfered to a delete
// task posted to the MessageLoopForIO. This ensures that |watcher_| isn't // task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
// deleted while it is being used by the MessageLoopForIO. // deleted while it is being used by the MessageLoopForIO.
raw_ptr<Watcher, DanglingUntriaged> watcher_; //
// TODO(crbug.com/1298696): Breaks base_unittests.
raw_ptr<Watcher, DanglingUntriagedDegradeToNoOpWhenMTE> watcher_;
// An event for the watcher to notify controller that it's destroyed. // An event for the watcher to notify controller that it's destroyed.
// As the |watcher_| is owned by Controller, always outlives the Watcher. // As the |watcher_| is owned by Controller, always outlives the Watcher.

View File

@ -350,9 +350,9 @@ class BASE_EXPORT FilePath {
// Returns a FilePath by appending a separator and the supplied path // Returns a FilePath by appending a separator and the supplied path
// component to this object's path. Append takes care to avoid adding // component to this object's path. Append takes care to avoid adding
// excessive separators if this object's path already ends with a separator. // excessive separators if this object's path already ends with a separator.
// If this object's path is kCurrentDirectory ('.'), a new FilePath // If this object's path is kCurrentDirectory, a new FilePath corresponding
// corresponding only to |component| is returned. |component| must be a // only to |component| is returned. |component| must be a relative path;
// relative path; it is an error to pass an absolute path. // it is an error to pass an absolute path.
[[nodiscard]] FilePath Append(StringPieceType component) const; [[nodiscard]] FilePath Append(StringPieceType component) const;
[[nodiscard]] FilePath Append(const FilePath& component) const; [[nodiscard]] FilePath Append(const FilePath& component) const;
[[nodiscard]] FilePath Append(const SafeBaseName& component) const; [[nodiscard]] FilePath Append(const SafeBaseName& component) const;

View File

@ -46,11 +46,11 @@ class BASE_EXPORT FilePathWatcher {
// within the directory are watched. // within the directory are watched.
kRecursive, kRecursive,
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_MAC)
// Indicates that the watcher should watch the given path only (neither // Indicates that the watcher should watch the given path only (neither
// ancestors nor descendants). The watch fails if the path does not exist. // ancestors nor descendants). The watch fails if the path does not exist.
kTrivial, kTrivial,
#endif // BUILDFLAG(IS_APPLE) #endif // BUILDFLAG(IS_MAC)
}; };
// Flags are a generalization of |Type|. They are used in the new // Flags are a generalization of |Type|. They are used in the new

View File

@ -390,24 +390,14 @@ bool IsPathSafeToSetAclOn(const FilePath& path) {
if (g_extra_allowed_path_for_no_execute) { if (g_extra_allowed_path_for_no_execute) {
valid_paths.push_back(g_extra_allowed_path_for_no_execute); valid_paths.push_back(g_extra_allowed_path_for_no_execute);
} }
// MakeLongFilePath is needed here because temp files can have an 8.3 path
// under certain conditions. See comments in base::MakeLongFilePath.
base::FilePath long_path = base::MakeLongFilePath(path);
DCHECK(!long_path.empty()) << "Cannot get long path for " << path;
for (const auto path_type : valid_paths) { for (const auto path_type : valid_paths) {
base::FilePath valid_path; base::FilePath valid_path;
if (!base::PathService::Get(path_type, &valid_path)) { if (base::PathService::Get(path_type, &valid_path)) {
DLOG(FATAL) << "Cannot get path for pathservice key " << path_type; // Temp files can sometimes have an 8.3 path. See comments in
continue; // base::MakeLongFilePath.
} if (base::MakeLongFilePath(valid_path).IsParent(path)) {
// Temp files can sometimes have an 8.3 path. See comments in return true;
// base::MakeLongFilePath. }
base::FilePath full_path = base::MakeLongFilePath(valid_path);
DCHECK(!full_path.empty()) << "Cannot get long path for " << valid_path;
if (full_path.IsParent(long_path)) {
return true;
} }
} }
return false; return false;
@ -1110,7 +1100,9 @@ bool PreventExecuteMapping(const FilePath& path) {
return true; return true;
} }
bool is_path_safe = IsPathSafeToSetAclOn(path); // MakeLongFilePath is needed here because temp files can have an 8.3 path
// under certain conditions. See comments in base::MakeLongFilePath.
bool is_path_safe = IsPathSafeToSetAclOn(base::MakeLongFilePath(path));
if (!is_path_safe) { if (!is_path_safe) {
// To mitigate the effect of past OS bugs where attackers are able to use // To mitigate the effect of past OS bugs where attackers are able to use
@ -1145,7 +1137,7 @@ bool PreventExecuteMapping(const FilePath& path) {
// dangerous path is being passed to a renderer, which is inherently unsafe. // dangerous path is being passed to a renderer, which is inherently unsafe.
// //
// If this check hits, please do not ignore it but consult security team. // If this check hits, please do not ignore it but consult security team.
DLOG(FATAL) << "Unsafe to deny execute access to path : " << path; NOTREACHED() << "Unsafe to deny execute access to path : " << path;
return false; return false;
} }

View File

@ -457,16 +457,6 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE)) else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
created_ = true; created_ = true;
if (flags & FLAG_WIN_NO_EXECUTE) { if (flags & FLAG_WIN_NO_EXECUTE) {
// These two DCHECKs make sure that no callers are trying to remove
// execute permission from a file that might need to be mapped executable
// later. If they hit in code then the file should not have
// FLAG_WIN_NO_EXECUTE flag, but this will mean that the file cannot be
// passed to renderers.
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".exe"),
path.Extension()));
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".dll"),
path.Extension()));
// It is possible that the ACE could not be added if the file was created // It is possible that the ACE could not be added if the file was created
// in a path for which the caller does not have WRITE_DAC access. In this // in a path for which the caller does not have WRITE_DAC access. In this
// case, ignore the error since if this is occurring then it's likely the // case, ignore the error since if this is occurring then it's likely the

Some files were not shown because too many files have changed in this diff Show More