Compare commits

...

No commits in common. "12a6ba324f8bc147f3f7ba299e9264e68d2d1a25" and "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" have entirely different histories.

1793 changed files with 28699 additions and 43316 deletions

View File

@ -1 +1 @@
111.0.5563.64
112.0.5615.49

View File

@ -126,6 +126,7 @@ Anton Obzhirov <a.obzhirov@samsung.com>
Antonin Hildebrand <antonin.hildebrand@gmail.com>
Antonio Gomes <a1.gomes@sisa.samsung.com>
Anuj Kumar Sharma <anujk.sharma@samsung.com>
Ao Hui <aohui.wan@gmail.com>
Ao Sun <ntusunao@gmail.com>
Ao Wang <wangao.james@bytedance.com>
Aquibuzzaman Md. Sayem <md.sayem@samsung.com>
@ -277,6 +278,7 @@ Daniel Waxweiler <daniel.waxweiler@gmail.com>
Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com>
Danny Weiss <danny.weiss.fr@gmail.com>
Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com>
Darshan Sen <raisinten@gmail.com>
@ -376,6 +378,7 @@ Feifei Wang <alexswang@tencent.com>
Felipe Erias Morandeira <felipeerias@gmail.com>
Felix H. Dahlke <fhd@ubercode.de>
Felix Weilbach <feweilbach@gmail.com>
Feng Shengyuan <fengshengyuan@agora.io>
Feng Yu <f3n67u@gmail.com>
Fengrong Fang <fr.fang@samsung.com>
Fernando Jiménez Moreno <ferjmoreno@gmail.com>
@ -605,6 +608,7 @@ John Kleinschmidt <kleinschmidtorama@gmail.com>
John Yani <vanuan@gmail.com>
John Yoo <nearbyh13@gmail.com>
Johnson Lin <johnson.lin@intel.com>
Jon Jensen <jonj@netflix.com>
Jonathan Frazer <listedegarde@gmail.com>
Jonathan Garbee <jonathan@garbee.me>
Jonathan Hacker <jhacker@arcanefour.com>
@ -640,6 +644,7 @@ Julien Isorce <j.isorce@samsung.com>
Julien Racle <jracle@logitech.com>
Jun Fang <jun_fang@foxitsoftware.com>
Jun Jiang <jun.a.jiang@intel.com>
Junbong Eom <jb.eom@samsung.com>
Jungchang Park <valley84265@gmail.com>
Junchao Han <junchao.han@intel.com>
Junghoon Lee <sjh836@gmail.com>
@ -740,6 +745,7 @@ Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com>
Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com>
Lin Peng <penglin220@gmail.com>
Lin Peng <penglin22@huawei.com>
Lingqi Chi <someway.bit@gmail.com>
Lingyun Cai <lingyun.cai@intel.com>
@ -762,7 +768,7 @@ Luke Seunghoe Gu <gulukesh@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Luoxi Pan <l.panpax@gmail.com>
Lu Yahan <yahan@iscas.ac.cn>
Ma Aiguo <maaiguo@uniontech.com>
Ma Aiguo <imaiguo@gmail.com>
Maarten Lankhorst <m.b.lankhorst@gmail.com>
Maciej Pawlowski <m.pawlowski@eyeo.com>
Magnus Danielsson <fuzzac@gmail.com>
@ -915,6 +921,7 @@ Noj Vek <nojvek@gmail.com>
Nolan Cao <nolan.robin.cao@gmail.com>
Oleksii Kadurin <ovkadurin@gmail.com>
Oliver Dunk <oliver@oliverdunk.com>
Olivier Tilloy <olivier+chromium@tilloy.net>
Olli Raula (Old name Olli Syrjälä) <olli.raula@intel.com>
Omar Sandoval <osandov@osandov.com>
Owen Yuwono <owenyuwono@gmail.com>
@ -1199,6 +1206,7 @@ Suyash Nayan <suyashnyn1@gmail.com>
Suyash Sengar <suyash.s@samsung.com>
Swarali Raut <swarali.sr@samsung.com>
Swati Jaiswal <swa.jaiswal@samsung.com>
Syed Wajid <syed.wajid@samsung.com>
Sylvain Zimmer <sylvinus@gmail.com>
Sylvestre Ledru <sylvestre.ledru@gmail.com>
Synthia Islam <synthia.is@samsung.com>

358
src/DEPS
View File

@ -229,7 +229,7 @@ vars = {
#
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '15326.0.0',
'lacros_sdk_version': '15357.0.0',
# Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates
@ -241,7 +241,7 @@ vars = {
# luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:221383f749a2c5b8587449d3d2e4982857daa9e7',
'luci_go': 'git_revision:8a8b4f2ea65c7ff5fde8a0c522008aed78d42d9d',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision.
@ -286,11 +286,6 @@ vars = {
# Rust toolchain.
'checkout_rust_toolchain_deps': False,
# The Rust toolchain sources. It is a version tag from an instance of the
# CIPD `chromium/third_party/rust_src` package. This field is written by
# //tools/clang/scripts/upload_revision.py and shouldn't be updated by hand.
'rust_toolchain_version': 'version:2@2022-12-09',
'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com',
'boringssl_git': 'https://boringssl.googlesource.com',
@ -304,34 +299,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': '59932b057f281ddaeb0926ecfac55486270f8c51',
'skia_revision': 'f5fefe5245098be43cb608eace5e14d67cdc09e6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': '1cee747760b14aa78503a22ba1a3ab97b968fa28',
'v8_revision': '96fed67922e5f54a027aed80259e5083769e33e2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': 'cd45d155bf4cf7404061f37e974a048914ca4610',
'angle_revision': '293db5ce4d0766cb3ba7711057a00f0a5bddb00d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': 'aae98adc2222dcada4aa952cccad48ab08e34004',
'swiftshader_revision': '3575b5479af54e471ea6750a8585e2c9bc87801c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': 'd087df316170b2d8757487b1015244384624478e',
'pdfium_revision': '4090d4c0f9873f5f50b630c26c2439b5297a6e49',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '45b8d7bbd771cbf7e116db2ba1f1cc7af959497e',
'boringssl_revision': 'ca1690e221677cea3fb946f324eb89d846ec53f2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:11.20230126.1.1',
'fuchsia_version': 'version:11.20230223.1.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -351,11 +346,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other.
'nacl_revision': '5b530a9ffd34be8541037262cf47cf3924bfc973',
'nacl_revision': '417b413dbe94a861ee050d42daf945cca02dba11',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': '82ce172669f132839debe6e50a3a53fe88db9e31',
'freetype_revision': '3f01161ff22c84c371b6dc3b5e0351e0d6e8e771',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
@ -375,15 +370,23 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': 'abaad53f0c4e104ab630b314af2902ad83b82c8c',
'catapult_revision': '4274fe29dac91b7713244daebb6f1d2364d97193',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling CrossBench
# and whatever else without interference from each other.
'crossbench_revision': '27639d495e1cec411073bc82ba1fe368ce0ca89a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
'libfuzzer_revision': 'debe7d2d1982e540fbd6bd78604bf001753f9e74',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling centipede
# and whatever else without interference from each other.
'centipede_revision': '63ed43d2bfa2c04e291e81d643a5581c9be90c53',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': 'f0bf0ece4aae3192fa2f0f2859f668cb343791be',
'devtools_frontend_revision': '2436ae2c5444ba8008a9f092301209a87aef0483',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
@ -411,6 +414,10 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
'android_sdk_platforms_preview_version': 'YWMYkzyxGBgVsty0GhXL1oxbY0pGXQIgFc0Rh7ZMRPYC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version
@ -419,11 +426,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': 'e5193f1ffd232ebf7adfd403114edde08d162663',
'dawn_revision': 'de24841411c4cfb13662238327f2e456c82d26f6',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': '566b33c9fa5b1723db04be3d40dcaf102344c323',
'quiche_revision': '40c87d454d762f3daaeca334cd2dc962f0476b13',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -443,7 +450,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': '26973fada5175060db140d7e1157cce6b604dc6a',
'nearby_revision': 'd260feced56cfdea53f941883c250d635ed6064d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -455,19 +462,19 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'cros_components_revision': '5e449ecf7311cba83cdcfc1b2ae449f40d22f29e',
'cros_components_revision': 'fb2448fc618b4e3634c8c4097b4a84fcfa34c540',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'resultdb_version': 'git_revision:ac21cf9883af7d1bf6d60e8a7448918eb3b6aa18',
'resultdb_version': 'git_revision:ebc74d10fa0d64057daa6f128e89f3672eeeec95',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': 'd520d582aa710cc0a4635620c02c5dbc187deb4f',
'libcxxabi_revision': '87d8fe050bedb143f232b9ff99a0a46897640e5d',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': 'e95b94b74d26f8387d4fb03a687a2fab0ed8e91c',
'libunwind_revision': 'c5e861c7b48ee121d3719b7b5c14175c47ec5cc9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -475,18 +482,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'highway_revision': '8ae5b88670fb918f815b717c7c13d38a9b0eb4bb',
'highway_revision': '58746ca5b9f9444a2a3549704602ecc6239f8f41',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': 'a249b21db6516234e5456716ae074fbb00176b3f',
'ffmpeg_revision': 'ee0c52d52036ecadfd38affec86c04937480bedb',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': '1127c78cf90cf253be614a1e1d3645da57edbeb4',
'libcxx_revision': 'e136ec5032a5e5d97e988ce66e8c269a80ff54c4',
# GN CIPD package version.
'gn_version': 'git_revision:5e19d2fb166fbd4f6f32147fbb2f497091a54ad8',
'gn_version': 'git_revision:b25a2f8c2d33f02082f0f258350f5e22c0973108',
# ninja CIPD package version.
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
@ -579,19 +586,6 @@ deps = {
'condition': 'host_os == "linux"',
},
# Rust sources are used to build the Rust standard library, and on toolchain
# build bots, to build the Rust toolchain.
'src/third_party/rust_src/src': {
'packages': [
{
'package': 'chromium/third_party/rust_src',
'version': Var('rust_toolchain_version'),
},
],
'dep_type': 'cipd',
'condition': 'checkout_rust_toolchain_deps or use_rust',
},
# We don't know target_cpu at deps time. At least until there's a universal
# binary of httpd-php, pull both intel and arm versions in DEPS and then pick
# the right one at runtime.
@ -631,7 +625,7 @@ deps = {
'packages': [
{
'package': 'chromium/android_webview/tools/cts_archive',
'version': 'C4m9-gZKvvtS0-KQ9cMRi6wEfZH_TeWSiXEgsgtGtsMC',
'version': 'APYMYksv9eNAkU6osFvNXN38pMO1Q1kScjeecePr7NgC',
},
],
'condition': 'checkout_android',
@ -654,7 +648,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': '1c74wqdugvZKRLfu6oY7wkYQ_VpIAObl_7TDwLQw8w4C',
'version': 'u1XGTm7703jO-Ax33P8j-x_KijOeHd36aBA6SO8V3a8C',
},
],
}
@ -665,7 +659,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal_prod',
'version': 'aFiR8jLJBXsy6aYQhQp8cd9yBEmqa_cJZwx0ltJbKT4C',
'version': 'qDhUmRj82uhWqE2eVqt12e1eJKWKgRpRjgQrBSrdyP4C',
},
],
},
@ -676,7 +670,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'G5yIDMjjCL2TyjU-EmLubZkkb1sLM0XdZ5zB1XmviQkC',
'version': 'RmZn_R0BOPSbruD15DEq9pfu5IhhtjoJX6z-ufrWnD4C',
},
],
},
@ -687,7 +681,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'LHw1kjfI3H94qB22t7YsgnMQyXBBgxCgtub5F-GxSCEC',
'version': 'AAes70A2b8-CLhU1h9Sh1x2K-N3WjG7j2Tlp6VOgmnQC',
},
],
},
@ -699,7 +693,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': 'Rd4fkayO0xqiXgM-WjFwSTh1YECDXF5ZfcSLW_odlz0C',
'version': '5ui7_fqpvI7a8omWqM8iyD0PZFPJpYKoMHkAb6xA_TkC',
},
],
},
@ -711,7 +705,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'UoyUoEkFS37BkNrD1mNVfqdDlPujDLGwaGBdWz7bPakC',
'version': '0KnizXQ2_n_V3aEHR240LqMKw7b0-Pm9VBUmVuQh0cAC',
},
],
},
@ -722,7 +716,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'Sl2g34_swdY9lIDQA6pTzPSTM5tec284DtwYekj9BIwC',
'version': 'g_24x4tVzQIoveectPGIXIGc2ptYDTnOodXieF_OG_4C',
},
],
},
@ -733,18 +727,18 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'vnZtwC2H42TkFgDMIYizwAUGFXTMOm00-yjnB6gnXigC',
'version': 's6U9lpJZsILIKkP5bCc_QqvJjPW2MwMWg5IoCBt_YEYC',
},
],
},
'src/chrome/test/data/autofill/captured_sites': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + '58a7920c173397b57d8d7be95cb93c2b43d02e26',
'src/chrome/test/data/autofill/captured_sites/artifacts': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/autofill.git' + '@' + 'a38d810c87a18582e986b94650d9cfa4b67be12c',
'condition': 'checkout_chromium_autofill_test_dependencies',
},
'src/chrome/test/data/password/captured_sites': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '93dfc8b7199a285a2ed832e607b0e68c5544273a',
'src/chrome/test/data/password/captured_sites/artifacts': {
'url': 'https://chrome-internal.googlesource.com/chrome/test/captured_sites/password.git' + '@' + '04b3ea663adf745c52831650e2426b54bc94e65d',
'condition': 'checkout_chromium_password_manager_test_dependencies',
},
@ -770,21 +764,21 @@ deps = {
'src/clank': {
'url': 'https://chrome-internal.googlesource.com/clank/internal/apps.git' + '@' +
'7d4e93f3d1693f1dfe471527e93a8e729ce149a3',
'd83811f32343245218e742319cec89aaefb94657',
'condition': 'checkout_android and checkout_src_internal',
},
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '7e351332addd1fca691bb524c976a56f17e3eb95',
'url': Var('chromium_git') + '/website.git' + '@' + '182a630499559e1403237d14e2bc6302d384ed39',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '99ba3b6ed7b8489899f06a0d602e84fc657e8338',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '8ac47627cb9ba09bf4bc3489b7aff5d77cd6eb88',
'condition': 'checkout_ios',
},
'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'd4d6f7da76f34b87b7b953176ef9e08eda83afb1',
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '51058a369f943064dc6db4f38dca32263d584ea5',
'condition': 'checkout_ios',
},
@ -799,7 +793,7 @@ deps = {
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '2cdac2db582f6067b014aa66a3846588352361a1',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + 'a407ef3ac3220882732e701804613c44704ebf78',
'condition': 'checkout_ios',
},
@ -869,7 +863,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'bCwganuATTWjTe2ahjxynn8rnTuSw900jtLOYmi3kV4C',
'version': 'E7vzLhZk6xAJnnioidm0-d5a4cz1OxOr0LJUsCkKKJ0C',
},
],
'dep_type': 'cipd',
@ -880,7 +874,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'ms15aJhiLzrF61zOZxq2jcESbsF3FFYtCS8R290t8JsC',
'version': '4wYh3p2y6ATe5OeiGmtl-G9thdrKGoX5DHzaP8V_tecC',
},
],
'dep_type': 'cipd',
@ -891,7 +885,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'xILgcx3FOZDE8fCy3EXmw76GOIrdmlVb3aQ5dUSq8x8C',
'version': 'E5Y3kcrVZt1PybXoGxTDRb_KmswZX_5jBphOaHwm2fQC',
},
],
'dep_type': 'cipd',
@ -959,7 +953,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': '8KUWsjmvRQJlzdaro14SgP8nok3-kHY2h00BEjXLJqQC',
'version': 'zEg6hTXAR6Mcqyfh8gHDzD9fmyy1xwz4swj6pkENIYsC',
},
],
'condition': 'checkout_android',
@ -997,7 +991,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'cbNG7g8Sinh-lsT8hWsU-RyXqLT_uh4jIb1fjCdhrzIC',
'version': '36NqCian2RIwuM6SFfizdUgKoXyZhy3q6pFfsws0szYC',
},
],
'condition': 'checkout_android',
@ -1019,7 +1013,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'XIPSJgFHEHN1ogOJqWVktlbl8PTfLZdNf_G2h4GcnrYC',
'version': 'TpDdbF-PPgwL0iOVsdLM07L-DUp2DV3hgzCMmPd2_GUC',
},
],
'condition': 'checkout_android',
@ -1030,7 +1024,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'EPj7vnLteKz9kMQ6x4ZPaM5E20Bt4I0wTdrxdBpruZMC',
'version': 'MSpv-kFDDSPO0SY0dLdHegUJcJT1Yy8cL9r3vlAZ9vkC',
},
],
'condition': 'checkout_android',
@ -1041,7 +1035,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '5Zw4RYBL86koJro2O-jjcZYxOOdEW-hJDYykae8efQAC',
'version': 'EbRaK62t9grqlZqL-JTd_zwM4t1u9fm1x4c2rLE0cqQC',
},
],
'condition': 'checkout_android',
@ -1075,6 +1069,10 @@ deps = {
'package': 'chromium/third_party/android_sdk/public/platforms/android-33',
'version': Var('android_sdk_platforms_version'),
},
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-tiramisuprivacysandbox',
'version': Var('android_sdk_platforms_preview_version'),
},
{
'package': 'chromium/third_party/android_sdk/public/sources/android-31',
'version': Var('android_sdk_sources_version'),
@ -1122,7 +1120,7 @@ deps = {
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + '79326ebe9446add03e76b4422ff8036e812224d2',
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'abb105db21e962eda5b7d9b7a0ac8dd701e0b987',
'src/third_party/byte_buddy': {
'packages': [
@ -1147,7 +1145,7 @@ deps = {
},
'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + 'f4628fda1b370eb238ae69545024d256ca62d719',
Var('chromium_git') + '/cast_core/public' + '@' + 'e42ef68aa05ac0c163805f60b9b19284f3c2dee3',
'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1176,7 +1174,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'd60807b98527df86e516532b8e2a62a1cb44c128',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '52efbfdc210624f1895d5994149f53c3a4457f29',
'condition': 'checkout_chromeos',
},
@ -1194,23 +1192,27 @@ deps = {
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'e0bfd3d75917adfa22e401805f9f9793cec82559',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'ffb88930c81ef7f7026a2433c424d8b3658580d4',
'condition': 'checkout_linux',
},
'src/third_party/crossbench':
Var('chromium_git') + '/crossbench.git' + '@' + Var('crossbench_revision'),
'src/third_party/crubit/src': {
'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'),
'condition': 'checkout_crubit',
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'c023a6302fc665bae743a5833dea350fd3d0749f',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a6898e71abf374d699ebaa121312e89bad8f100a',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
'src/third_party/devtools-frontend-internal': {
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '4c3517346586ea020e5859cf51488e534a90d15e',
'url': 'https://chrome-internal.googlesource.com/devtools/devtools-internal.git' + '@' + '2ac32de43d557d678de46fb7cfc850187f3379fd',
'condition': 'checkout_src_internal',
},
@ -1218,7 +1220,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '6156797016164b87b3e360e02d0e4107f7f66fbc',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '3460f3558e7b469efb8a225894e21929c8c77629',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '045f146fca682a836e01cd265171312bfb300e06',
@ -1246,11 +1248,11 @@ deps = {
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + '689da3a7ed50af7448c3f1961d1791c7c1d9c85c',
'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'b47ba1d5ffae3bd4d5eaad615e33d7cc5c1e3d4a',
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'a56f9ec50e908362e20254fcef28e62a2f148d91',
# Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': {
'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '452be8125f0e2a18a7dfef469e05d19374d36307',
'url': Var('chromium_git') + '/external/fontconfig.git' + '@' + '06929a556fdc39c8fe12965b69070c8df520a33e',
'condition': 'checkout_linux',
},
@ -1370,7 +1372,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '4e100720a20a471ca5ceff3b15a87596b694ada4',
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'faf22e66ceafad90f5cafbd6707055c24646adcc',
'src/third_party/icu4j': {
'packages': [
@ -1433,11 +1435,22 @@ deps = {
'condition': 'checkout_android',
},
'src/third_party/kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/kotlin_stdlib',
'version': 'Mg7371mEUwDQH4_z29HdWqYWVlXN6t2dXX0kIutg_SwC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/kotlinc/current': {
'packages': [
{
'package': 'chromium/third_party/kotlinc',
'version': 'F-v9Yy4tNQtjGB7TtAWc2J-3qhx9Q6ixZJyuGixVH08C',
'version': 'bCZedwoM-hb1pP1QKzA3P5aR4zjZltqLj4JQpmQsHuUC',
},
],
'condition': 'checkout_android',
@ -1450,11 +1463,14 @@ deps = {
'src/third_party/libFuzzer/src':
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
'src/third_party/centipede/src':
Var('chromium_git') + '/external/github.com/google/centipede.git' + '@' + Var('centipede_revision'),
'src/third_party/libaddressinput/src':
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'e8712e415627f22d0b00ebee8db99547077f39bd',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '74d61ae86f20bc9fb707347bfe618425024f3865',
Var('aomedia_git') + '/aom.git' + '@' + '70b12695e1967d9589dd15b345a039e575e8f429',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1487,7 +1503,7 @@ deps = {
},
'src/third_party/libjpeg_turbo':
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + 'ed683925e4897a84b3bffc5c1414c85b97a129a3',
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '0b6e6a152242c2fa30ffb29633d1b7d7b229b46b',
'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
@ -1515,16 +1531,16 @@ deps = {
},
'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'b7c22b3a9584d7d9c0a7b9b37a52bc595113b398',
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'db69ce6aea278bee88668fd9cc2af2e544516fdb',
'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libwebp/src':
Var('chromium_git') + '/webm/libwebp.git' + '@' + '603e8d7adb0ccc35237419c2938194623b60e9be',
Var('chromium_git') + '/webm/libwebp.git' + '@' + 'fd7b5d48464475408d32d2611bdb6947d4246b97',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '6e4b0acb4b3d5858c77a044aad46132998ac4a76',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '2bdc210be9eb11ded16bf3ef1f6cadb0d4dcb0c2',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1651,7 +1667,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'db956674bbdfbaab5acdd3fdb4117c2fef5527e9',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + 'eca304d29cee3f9d045fd0dd36f147a91a367c75',
Var('chromium_git') + '/openscreen' + '@' + '5d694418bc76f66463f06ce141c375062b0ba3b0',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1668,7 +1684,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '3aa2acd9af48d097ad94cf778c2228031e6c4dfa',
Var('android_git') + '/platform/external/perfetto.git' + '@' + '4bda78645d1d23a98473b793bc532a3ebff6c7f9',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1702,13 +1718,13 @@ deps = {
},
'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '8afcf7fcc481692197e33612446d69e8f5777c54',
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '3a8436ac436124a57a4e22d5c8713a2d42b381d7',
'src/third_party/r8': {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'lhnuNLpWpWBVM6efX0iIg5i9ztfW8VKpMvkyfWCxfr0C',
'version': 'HGbnG0_a1HHQtwgKBlFRLuC0-AVyYhHpcTol560MvlUC',
},
],
'condition': 'checkout_android',
@ -1722,7 +1738,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'qGtBu6TtxyR5XNy4cmsslb7c946YtkZF5_QCjVP-wc8C',
'version': 'PwglNZFRNPkBBXdnY9NfrZFk2ULWDTRxhV9rl2kvkpUC',
},
],
'condition': 'checkout_android',
@ -1746,7 +1762,7 @@ deps = {
},
'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '3168a5c8f4c447fd8cea94078121ee2e2cd87df0',
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '21a85fef159f9942f636a43b14c64b481c2a05b2',
'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1758,7 +1774,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '469aae8118e18b7354607f8ef09780cf8f3e54aa',
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '88742a54683bcdec9a0d0c14462621da8b6f841e',
'src/third_party/sqlite4java': {
'packages': [
@ -1800,20 +1816,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ef70dc999eee784e3f505e89c798f8b9cc894e52',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '60ec371de65a63d588bcfce7a99482847ad1312e',
'src/third_party/turbine': {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 't0TeGgk2CZr3B0HtEqBb60BSRwVPRJm9066izhJwzz0C',
'version': 'YQC-btuan_DTe9V9dv_e1LxgYSWeOoDfrd-VSqzIvHkC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@243deb3abd84f442957dc5394745d25482ff791b',
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@3c1556cc73226c2895c1de9a925dc5fe623c8752',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1850,10 +1866,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'd1b65aa5a88f6efd900604dfcda840154e9f16e2',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'a7e54e7b964d08901cba6418ca00ffec501bc867',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '6c8361e98f1daba65902f5e2fc1297893ac14b67',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'b6b9b1fc761c039195faa033cb8fdde4ed8ba0a9',
Var('webrtc_git') + '/src.git' + '@' + 'd75b9e9ff07ee42841b4e416629c9fbd4b058905',
# Wuffs' canonical repository is at github.com/google/wuffs, but we use
# Skia's mirror of Wuffs, the same as in upstream Skia's DEPS file.
@ -1871,7 +1887,7 @@ deps = {
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a50369c0fdd15f0f35b1a91c964644327a88d480',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '659147817805d17c7be2d60bd7bbca7e780f9c82',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1880,7 +1896,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'PZOpm-VdLUuaVE8seo910YRCnqv7Y2BkPcrmUs13RMAC',
'version': '-G9gUusEGDPsbf_GULdyJo9DYyeNBuqD8gHfdxCvIbYC',
},
],
'dep_type': 'cipd',
@ -1890,7 +1906,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': 'qp3u_bn43vFlG3HHG61Co9eOeo52m6SWIw099mHqt9EC',
'version': 'BZ0EL-KSkwCzJciJf9MbwmZAJPRhlKOp0LEYiTV6lWIC',
},
],
'dep_type': 'cipd',
@ -1901,7 +1917,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': 'aZ8KYJUPYrRq4f7-Td3nt0et_16S06A0vovOn2c85tIC',
'version': '0GVvuvDBNt6KJ7UzxBRUW5ShTWCliifyzaCkudNzmrkC',
},
],
'dep_type': 'cipd',
@ -1912,7 +1928,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'JtcfJFsvsUuaaIajqvwETn8j5hxOSC_YLDszV96Ukn8C',
'version': '8vKG1ZGA0f7asv5AHh_7yBxVD2h-I-yR2oY4TOjwo6kC',
},
],
'dep_type': 'cipd',
@ -1923,7 +1939,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@f1d52b8c1ec0769ac006917d1fe42e99a4dba6c3',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@67c8cac0a84ad86b64ecf3f4af23a928fb605313',
'condition': 'checkout_src_internal',
},
@ -1942,7 +1958,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'Y9Vb3-WAI0IRjTRTVDtPP86MNNpZItvfey3JuYZXXeYC',
'version': 'WyNqAPOj-HR5fZBbkHIXf7YeyCvf0GpXuhdv6EqzNJsC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1953,7 +1969,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': 'J19Uq07iO__IsduQFotOfHNdiRWoyIQc4UgK1HpMFU8C',
'version': 'hF_ZkOgJWb6Tl-9h6WAmpF4VcZggBH4rjoP_hBr2ddUC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1964,7 +1980,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'CHpgn1-7IChFiK96I1-giMbXe-Cl9XQiwH3aHwCGzYwC',
'version': 'EXosTZG9iiyjnqmWKjS04Tf9dvSUjbHqqhGv1SQW0ycC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1975,7 +1991,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': 'ufJ9DwqTBE76l81FUQQ2JOIG1ely5QRDFuwz3ccJIRIC',
'version': 'zmInwk2DIsJlzZbF9Fw29hmN6rQTpzqIgzzMAgwl2PkC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1994,7 +2010,7 @@ deps = {
},
'src/third_party/android_prebuilts/build_tools': {
'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '5794e96eb8bae47bb48feee915d99583573b3887',
'url': Var('android_git') + '/platform/prebuilts/build-tools.git' + '@' + '673c20b524a83b662d8c1057fd3eec8fd0f93f9d',
'condition': 'checkout_android_prebuilts_build_tools',
},
@ -2569,7 +2585,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement',
'version': 'version:2@18.0.1.cr1',
'version': 'version:2@18.1.0.cr1',
},
],
'condition': 'checkout_android',
@ -2723,7 +2739,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks',
'version': 'version:2@18.0.1.cr1',
'version': 'version:2@18.0.2.cr1',
},
],
'condition': 'checkout_android',
@ -2763,11 +2779,22 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_play_core': {
'src/third_party/android_deps/libs/com_google_android_play_core_common': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core',
'version': 'version:2@1.10.0.cr1',
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core_common',
'version': 'version:2@2.0.2.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_play_feature_delivery': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_play_feature_delivery',
'version': 'version:2@2.0.1.cr1',
},
],
'condition': 'checkout_android',
@ -3328,7 +3355,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy',
'version': 'version:2@1.12.13.cr1',
'version': 'version:2@1.12.22.cr1',
},
],
'condition': 'checkout_android',
@ -3339,7 +3366,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/net_bytebuddy_byte_buddy_agent',
'version': 'version:2@1.12.13.cr1',
'version': 'version:2@1.12.22.cr1',
},
],
'condition': 'checkout_android',
@ -3478,39 +3505,6 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_annotations': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_annotations',
'version': 'version:2@13.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.8.0.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': {
'packages': [
{
@ -3581,7 +3575,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_mockito_mockito_core',
'version': 'version:2@4.7.0.cr1',
'version': 'version:2@5.1.1.cr1',
},
],
'condition': 'checkout_android',
@ -3592,7 +3586,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_objenesis_objenesis',
'version': 'version:2@3.2.cr1',
'version': 'version:2@3.3.cr1',
},
],
'condition': 'checkout_android',
@ -3876,16 +3870,26 @@ include_rules = [
'+third_party/icu/source/i18n/unicode',
'+url',
# Abseil features are allowlisted explicitly. See
# Abseil is allowed by default, but some features are banned. See
# //styleguide/c++/c++-features.md.
'-absl',
'-third_party/abseil-cpp',
'+third_party/abseil-cpp/absl/base/attributes.h',
'+third_party/abseil-cpp/absl/cleanup/cleanup.h',
'+third_party/abseil-cpp/absl/numeric/int128.h',
'+third_party/abseil-cpp/absl/types/optional.h',
'+third_party/abseil-cpp/absl/types/variant.h',
'+third_party/abseil-cpp/absl/utility/utility.h',
'+third_party/abseil-cpp',
'-third_party/abseil-cpp/absl/algorithm/container.h',
'-third_party/abseil-cpp/absl/container',
'-third_party/abseil-cpp/absl/crc',
'-third_party/abseil-cpp/absl/flags',
'-third_party/abseil-cpp/absl/functional/any_invocable.h',
'-third_party/abseil-cpp/absl/functional/bind_front.h',
'-third_party/abseil-cpp/absl/functional/function_ref.h',
'-third_party/abseil-cpp/absl/hash',
'-third_party/abseil-cpp/absl/log',
'-third_party/abseil-cpp/absl/random',
'-third_party/abseil-cpp/absl/status/statusor.h',
'-third_party/abseil-cpp/absl/strings',
'+third_party/abseil-cpp/absl/strings/cord.h',
'-third_party/abseil-cpp/absl/synchronization',
'-third_party/abseil-cpp/absl/time',
'-third_party/abseil-cpp/absl/types/any.h',
'-third_party/abseil-cpp/absl/types/span.h',
]
@ -4353,30 +4357,6 @@ hooks = [
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-focal.tgz.sha1',
],
},
{
'name': 'msan_chained_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1',
],
},
{
'name': 'msan_no_origins_xenial',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'src/third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1',
],
},
{
'name': 'wasm_fuzzer',
'pattern': '.',
@ -4572,7 +4552,7 @@ hooks = [
{
'name': 'Fetch Android AFDO profile',
'pattern': '.',
'condition': 'checkout_android or checkout_linux',
'condition': 'checkout_android',
'action': [ 'python3',
'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/newest.txt',
@ -4581,6 +4561,18 @@ hooks = [
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
],
},
{
'name': 'Fetch Android Arm AFDO profile',
'pattern': '.',
'condition': 'checkout_android',
'action': [ 'python3',
'src/tools/download_optimization_profile.py',
'--newest_state=src/chrome/android/profiles/arm.newest.txt',
'--local_state=src/chrome/android/profiles/arm.local.txt',
'--output_name=src/chrome/android/profiles/arm.afdo.prof',
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
],
},
{
'name': 'gvr_static_shim_android',
'pattern': '\\.sha1',

File diff suppressed because it is too large Load Diff

View File

@ -207,7 +207,7 @@ AllocatorDispatch g_allocator_dispatch = {&AllocFn,
} // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
#if BUILDFLAG(USE_PARTITION_ALLOC)
namespace base::allocator::dispatcher::partition_allocator_details {
namespace {
@ -222,7 +222,7 @@ void PartitionFreeHook(void* address) {
} // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
namespace base::allocator::dispatcher {
@ -236,11 +236,11 @@ void InstallStandardAllocatorHooks() {
// happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
#if BUILDFLAG(USE_PARTITION_ALLOC)
partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
}
} // namespace base::allocator::dispatcher

View File

@ -6,6 +6,8 @@
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/debug/crash_logging.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
@ -15,15 +17,25 @@
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
pthread_key_t ReentryGuard::entered_key_ = 0;
// pthread_key_t has different signedness on Mac and Android. Store the null
// value in a strongly-typed constant to avoid "comparison of integers of
// different signs" warnings when comparing with 0.
constexpr pthread_key_t kNullKey = 0;
pthread_key_t ReentryGuard::entered_key_ = kNullKey;
void ReentryGuard::InitTLSSlot() {
if (entered_key_ == 0) {
if (entered_key_ == kNullKey) {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
// Touch the TLS slot immediately to force any allocations.
// TODO(https://crbug.com/1411454): Use this technique to avoid allocations
// in PoissonAllocationSampler::ScopedMuteThreadSamples, which will make
// ReentryGuard redundant.
pthread_setspecific(entered_key_, nullptr);
}
DCHECK(entered_key_ != 0);
DCHECK_NE(entered_key_, kNullKey);
}
#else
@ -31,4 +43,19 @@ void ReentryGuard::InitTLSSlot() {
void ReentryGuard::InitTLSSlot() {}
#endif
void ReentryGuard::RecordTLSSlotToCrashKey() {
// Record the key in crash dumps to detect when it's higher than 32
// (PTHREAD_KEY_2NDLEVEL_SIZE).
// TODO(crbug.com/1411454): Remove this after diagnosing reentry crashes.
static auto* const crash_key = base::debug::AllocateCrashKeyString(
"reentry_guard_tls_slot", base::debug::CrashKeySize::Size32);
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
base::debug::SetCrashKeyString(crash_key, base::NumberToString(entered_key_));
#else
base::debug::SetCrashKeyString(crash_key, "unused");
#endif
}
} // namespace base::allocator::dispatcher

View File

@ -23,8 +23,10 @@ namespace base::allocator::dispatcher {
// twice. The scoped guard allows us to detect that.
//
// Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables.
// Make use of pthread TLS instead of C++ thread_local there.
// seem to allocate memory lazily on the first access to thread_local variables
// (and on Android at least thread_local is implemented on top of pthread so is
// strictly worse for performance). Make use of pthread TLS instead of C++
// thread_local there.
struct BASE_EXPORT ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
@ -37,12 +39,19 @@ struct BASE_EXPORT ReentryGuard {
explicit operator bool() const noexcept { return allowed_; }
// This function must be called in very early of the process start-up in
// order to acquire a low TLS slot number because glibc TLS implementation
// will require a malloc call to allocate storage for a higher slot number
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
// This function must be called before installing any allocator hooks because
// some TLS implementations may allocate (eg. glibc will require a malloc call
// to allocate storage for a higher slot number (>= PTHREAD_KEY_2NDLEVEL_SIZE
// == 32). This touches the thread-local storage so that any malloc happens
// before installing the hooks.
static void InitTLSSlot();
// InitTLSSlot() is called before crash keys are available. At some point
// after SetCrashKeyImplementation() is called, this function should be
// called to record `entered_key_` to a crash key for debugging. This may
// allocate so it must not be called from inside an allocator hook.
static void RecordTLSSlotToCrashKey();
private:
static pthread_key_t entered_key_;
const bool allowed_;
@ -56,6 +65,7 @@ struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; }
static void InitTLSSlot();
static void RecordTLSSlotToCrashKey();
};
#endif

View File

@ -32,10 +32,16 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr",
FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif
);
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"},
{DanglingPtrMode::kLogOnly, "log_only"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
@ -43,14 +49,24 @@ const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
DanglingPtrMode::kCrash,
&kDanglingPtrModeOption,
};
constexpr FeatureParam<DanglingPtrType>::Option kDanglingPtrTypeOption[] = {
{DanglingPtrType::kAll, "all"},
{DanglingPtrType::kCrossTask, "cross_task"},
};
const base::FeatureParam<DanglingPtrType> kDanglingPtrTypeParam{
&kPartitionAllocDanglingPtr,
"type",
DanglingPtrType::kAll,
&kDanglingPtrTypeOption,
};
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
BASE_FEATURE(kPartitionAllocPCScan,
"PartitionAllocPCScan",
FEATURE_DISABLED_BY_DEFAULT);
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// If enabled, PCScan is turned on only for the browser's malloc partition.
@ -89,6 +105,7 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT
#else
@ -183,11 +200,11 @@ BASE_FEATURE(kPartitionAllocPCScanEagerClearing,
// In addition to heap, scan also the stack of the current mutator.
BASE_FEATURE(kPartitionAllocPCScanStackScanning,
"PartitionAllocPCScanStackScanning",
#if defined(PA_PCSCAN_STACK_SUPPORTED)
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
#endif // defined(PA_PCSCAN_STACK_SUPPORTED)
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
);
BASE_FEATURE(kPartitionAllocDCScan,

View File

@ -6,7 +6,6 @@
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
@ -25,10 +24,6 @@ extern const BASE_EXPORT base::FeatureParam<UnretainedDanglingPtrMode>
kUnretainedDanglingPtrModeParam;
// See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDanglingPtr);
enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr.
@ -36,19 +31,33 @@ enum class DanglingPtrMode {
// Log the signature of every occurrences without crashing. It is used by
// bots.
// Format "[DanglingSignature]\t<1>\t<2>"
// 1. The function who freed the memory while it was still referenced.
// 2. The function who released the raw_ptr reference.
kLogSignature,
// Format "[DanglingSignature]\t<1>\t<2>\t<3>\t<4>"
// 1. The function which freed the memory while it was still referenced.
// 2. The task in which the memory was freed.
// 3. The function which released the raw_ptr reference.
// 4. The task in which the raw_ptr was released.
kLogOnly,
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
enum class DanglingPtrType {
// Act on any dangling raw_ptr released after being freed.
kAll, // (default)
#if PA_CONFIG(ALLOW_PCSCAN)
// Detect when freeing memory and releasing the dangling raw_ptr happens in
// a different task. Those are more likely to cause use after free.
kCrossTask,
// Note: This will be extended with LongLived
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrType>
kDanglingPtrTypeParam;
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScan);
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanBrowserOnly);
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocPCScanRendererOnly);

View File

@ -48,14 +48,14 @@
#include "build/build_config.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
#include "base/memory/nonscannable_memory.h"
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h"
@ -74,13 +74,13 @@ namespace {
namespace switches {
[[maybe_unused]] constexpr char kRendererProcess[] = "renderer";
constexpr char kZygoteProcess[] = "zygote";
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
constexpr char kGpuProcess[] = "gpu-process";
constexpr char kUtilityProcess[] = "utility";
#endif
} // namespace switches
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString(
@ -181,11 +181,11 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
static constexpr char kTraceCategory[] = "partition_alloc";
};
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
} // namespace
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
void RegisterPCScanStatsReporter() {
static StatsReporterImpl s_reporter;
static bool registered = false;
@ -195,7 +195,7 @@ void RegisterPCScanStatsReporter() {
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
namespace {
@ -302,7 +302,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
}
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
[[maybe_unused]] bool pcscan_enabled =
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
FeatureList::IsEnabled(features::kPartitionAllocPCScanBrowserOnly);
#else
false;
@ -378,7 +378,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
// fully controlled by Finch and thus have identical population sizes.
std::string pcscan_group_name = "Unavailable";
std::string pcscan_group_name_fallback = "Unavailable";
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
if (brp_truly_enabled) {
// If BRP protection is enabled, just ignore the population. Check
// brp_truly_enabled, not brp_finch_enabled, because there are certain modes
@ -395,7 +395,7 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
} else {
pcscan_group_name_fallback = (pcscan_enabled ? "Enabled" : "Disabled");
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
trials.emplace("PCScan_Effective", pcscan_group_name);
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
@ -415,11 +415,13 @@ namespace {
internal::PartitionLock g_stack_trace_buffer_lock;
struct StackTraceWithID {
struct DanglingPointerFreeInfo {
debug::StackTrace stack_trace;
debug::TaskTrace task_trace;
uintptr_t id = 0;
};
using DanglingRawPtrBuffer = std::array<absl::optional<StackTraceWithID>, 32>;
using DanglingRawPtrBuffer =
std::array<absl::optional<DanglingPointerFreeInfo>, 32>;
DanglingRawPtrBuffer g_stack_trace_buffer GUARDED_BY(g_stack_trace_buffer_lock);
void DanglingRawPtrDetected(uintptr_t id) {
@ -428,14 +430,14 @@ void DanglingRawPtrDetected(uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
#if DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
PA_DCHECK(!entry || entry->id != id);
}
#endif // DCHECK_IS_ON()
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
if (!entry) {
entry = {debug::StackTrace(), id};
entry = {debug::StackTrace(), debug::TaskTrace(), id};
return;
}
}
@ -444,15 +446,16 @@ void DanglingRawPtrDetected(uintptr_t id) {
// enough.
}
// From the StackTrace recorded in |DanglingRawPtrDetected|, extract the one
// From the traces recorded in |DanglingRawPtrDetected|, extract the one
// whose id match |id|. Return nullopt if not found.
absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
absl::optional<DanglingPointerFreeInfo> TakeDanglingPointerFreeInfo(
uintptr_t id) {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
for (absl::optional<StackTraceWithID>& entry : g_stack_trace_buffer) {
for (absl::optional<DanglingPointerFreeInfo>& entry : g_stack_trace_buffer) {
if (entry && entry->id == id) {
debug::StackTrace stack_trace = std::move(entry->stack_trace);
absl::optional<DanglingPointerFreeInfo> result(entry);
entry = absl::nullopt;
return stack_trace;
return result;
}
}
return absl::nullopt;
@ -463,14 +466,31 @@ absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
// are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) {
std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
stacktrace, "\r\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
// We are looking for the callers of the function releasing the raw_ptr and
// freeing memory:
const StringPiece callees[] = {
// Common signatures
"internal::PartitionFree",
"base::(anonymous namespace)::FreeFn",
// Linux signatures
"internal::RawPtrBackupRefImpl<>::ReleaseInternal()",
"internal::PartitionFree()",
"base::(anonymous namespace)::FreeFn()",
"base::RefCountedThreadSafe<>::Release()",
// Windows signatures
"internal::RawPtrBackupRefImpl<0>::ReleaseInternal",
"_free_base",
// Windows stack traces are prefixed with "Backtrace:"
"Backtrace:",
// Mac signatures
"internal::RawPtrBackupRefImpl<false>::ReleaseInternal",
// Task traces are prefixed with "Task trace:" in
// |TaskTrace::OutputToStream|
"Task trace:",
};
size_t caller_index = 0;
for (size_t i = 0; i < lines.size(); ++i) {
@ -481,79 +501,143 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
}
}
if (caller_index >= lines.size()) {
return "undefined";
return "no_callee_match";
}
StringPiece caller = lines[caller_index];
// |callers| follows the following format:
//
// #4 0x56051fe3404b content::GeneratedCodeCache::DidCreateBackend()
// -- -------------- -----------------------------------------------
// Depth Address Function
if (caller.empty()) {
return "invalid_format";
}
size_t address_start = caller.find(' ');
size_t function_start = caller.find(' ', address_start + 1);
// On Posix platforms |callers| follows the following format:
//
// #<index> <address> <symbol>
//
// See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] == '#') {
const size_t address_start = caller.find(' ');
const size_t function_start = caller.find(' ', address_start + 1);
if (address_start == caller.npos || function_start == caller.npos) {
return "undefined";
return "invalid_format";
}
return std::string(caller.substr(function_start + 1));
}
void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str(),
ExtractDanglingPtrSignature(stack_trace_free->ToString()).c_str());
} else {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
// On Windows platforms |callers| follows the following format:
//
// \t<symbol> [0x<address>]+<displacement>(<filename>:<line>)
//
// See https://crsrc.org/c/base/debug/stack_trace_win.cc
if (caller[0] == '\t') {
const size_t symbol_start = 1;
const size_t symbol_end = caller.find(' ');
if (symbol_end == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
}
void DanglingRawPtrReleasedCrash(uintptr_t id) {
// On Mac platforms |callers| follows the following format:
//
// <index> <library> 0x<address> <symbol> + <line>
//
// See https://crsrc.org/c/base/debug/stack_trace_posix.cc
if (caller[0] >= '0' && caller[0] <= '9') {
const size_t address_start = caller.find("0x");
const size_t symbol_start = caller.find(' ', address_start + 1) + 1;
const size_t symbol_end = caller.find(' ', symbol_start);
if (symbol_start == caller.npos || symbol_end == caller.npos) {
return "invalid_format";
}
return std::string(caller.substr(symbol_start, symbol_end - symbol_start));
}
return "invalid_format";
}
std::string ExtractDanglingPtrSignature(debug::TaskTrace task_trace) {
if (task_trace.empty()) {
return "No active task";
}
return ExtractDanglingPtrSignature(task_trace.ToString());
}
std::string ExtractDanglingPtrSignature(
absl::optional<DanglingPointerFreeInfo> free_info,
debug::StackTrace release_stack_trace,
debug::TaskTrace release_task_trace) {
if (free_info) {
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s",
ExtractDanglingPtrSignature(free_info->stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(free_info->task_trace).c_str(),
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
return StringPrintf(
"[DanglingSignature]\t%s\t%s\t%s\t%s", "missing", "missing",
ExtractDanglingPtrSignature(release_stack_trace.ToString()).c_str(),
ExtractDanglingPtrSignature(release_task_trace).c_str());
}
template <features::DanglingPtrMode dangling_pointer_mode,
features::DanglingPtrType dangling_pointer_type>
void DanglingRawPtrReleased(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
debug::TaskTrace task_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
absl::optional<DanglingPointerFreeInfo> free_info =
TakeDanglingPointerFreeInfo(id);
if constexpr (dangling_pointer_type ==
features::DanglingPtrType::kCrossTask) {
if (!free_info) {
return;
}
if (task_trace_release.ToString() == free_info->task_trace.ToString()) {
return;
}
}
std::string dangling_signature = ExtractDanglingPtrSignature(
free_info, stack_trace_release, task_trace_release);
static const char dangling_ptr_footer[] =
"\n"
"\n"
"Please check for more information on:\n"
"https://chromium.googlesource.com/chromium/src/+/main/docs/"
"dangling_ptr_guide.md\n";
if (stack_trace_free) {
"dangling_ptr_guide.md\n"
"\n"
"Googlers: Please give us your feedback about the dangling pointer\n"
" detector at:\n"
" http://go/dangling-ptr-cq-survey\n";
if (free_info) {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n"
<< dangling_signature << "\n\n"
<< "The memory was freed at:\n"
<< *stack_trace_free << "\n"
<< free_info->stack_trace << free_info->task_trace << "\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
} else {
LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n"
<< stack_trace_release << task_trace_release
<< dangling_ptr_footer;
}
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
ImmediateCrash();
}
}
void ClearDanglingRawPtrBuffer() {
internal::PartitionAutoLock guard(g_stack_trace_buffer_lock);
@ -573,16 +657,35 @@ void InstallDanglingRawPtrChecks() {
return;
}
partition_alloc::SetDanglingRawPtrDetectedFn(&DanglingRawPtrDetected);
switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
break;
case features::DanglingPtrMode::kLogSignature:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
switch (features::kDanglingPtrTypeParam.Get()) {
case features::DanglingPtrType::kAll:
partition_alloc::SetDanglingRawPtrReleasedFn(
DanglingRawPtrReleasedLogSignature);
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kCrash,
features::DanglingPtrType::kCrossTask>);
break;
}
break;
case features::DanglingPtrMode::kLogOnly:
switch (features::kDanglingPtrTypeParam.Get()) {
case features::DanglingPtrType::kAll:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
features::DanglingPtrType::kAll>);
break;
case features::DanglingPtrType::kCrossTask:
partition_alloc::SetDanglingRawPtrReleasedFn(
&DanglingRawPtrReleased<features::DanglingPtrMode::kLogOnly,
features::DanglingPtrType::kCrossTask>);
break;
}
break;
}
}
@ -632,7 +735,7 @@ void InstallUnretainedDanglingRawPtrChecks() {
namespace {
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
void SetProcessNameForPCScan(const std::string& process_type) {
const char* name = [&process_type] {
if (process_type.empty()) {
@ -713,7 +816,7 @@ bool EnablePCScanForMallocPartitionsInRendererProcessIfNeeded() {
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false;
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
} // namespace
@ -929,7 +1032,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false;
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
if (!enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process.
@ -963,10 +1066,10 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
SetProcessNameForPCScan(process_type);
}
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
// Non-quarantinable partition is dealing with hot V8's zone allocations.
// In case PCScan is enabled in Renderer, enable thread cache on this
// partition. At the same time, thread cache on the main(malloc) partition
@ -976,7 +1079,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
.root()
->EnableThreadCacheIfSupported();
} else
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
{
allocator_shim::internal::PartitionAllocMalloc::Allocator()
->EnableThreadCacheIfSupported();
@ -1058,7 +1161,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) &&
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocPCScanMUAwareScheduler)) {
// Assign PCScan a task-based scheduling backend.
@ -1070,7 +1173,7 @@ void PartitionAllocSupport::ReconfigureAfterTaskRunnerInit(
partition_alloc::internal::PCScan::scheduler().SetNewSchedulingBackend(
*mu_aware_task_based_backend.get());
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
base::allocator::StartMemoryReclaimer(
@ -1136,4 +1239,11 @@ void PartitionAllocSupport::OnBackgrounded() {
// BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
std::string PartitionAllocSupport::ExtractDanglingPtrSignatureForTests(
std::string stacktrace) {
return ExtractDanglingPtrSignature(stacktrace);
}
#endif
} // namespace base::allocator

View File

@ -19,7 +19,7 @@
namespace base::allocator {
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void RegisterPCScanStatsReporter();
#endif
@ -75,6 +75,11 @@ class BASE_EXPORT PartitionAllocSupport {
void OnForegrounded(bool has_main_frame);
void OnBackgrounded();
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
static std::string ExtractDanglingPtrSignatureForTests(
std::string stacktrace);
#endif
static PartitionAllocSupport* Get() {
static auto* singleton = new PartitionAllocSupport();
return singleton;

View File

@ -289,19 +289,20 @@ component("partition_alloc") {
}
if (use_starscan) {
if (current_cpu == "x64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
} else if (current_cpu == "arm") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
assert(pcscan_stack_supported)
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
} else {
# To support a trampoline for another arch, please refer to v8/src/heap/base.
assert(!pcscan_stack_supported)
}
}
public_deps = [
@ -397,6 +398,12 @@ source_set("raw_ptr") {
# See also: `partition_alloc_base/component_export.h`
defines = [ "IS_RAW_PTR_IMPL" ]
# When built inside Chromium, although this cannot directly be made a
# component, we expect `//base` to provide the only GN-level access.
if (build_with_chromium) {
visibility = [ "//base" ]
}
}
buildflag_header("partition_alloc_buildflags") {
@ -415,12 +422,15 @@ buildflag_header("partition_alloc_buildflags") {
# defines and partition alloc includes the header file. For chrome,
# gen/base/allocator/buildflags.h defines and chrome includes.
flags = [
"HAS_64_BIT_POINTERS=$has_64_bit_pointers",
"USE_PARTITION_ALLOC=$use_partition_alloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc",
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
@ -442,9 +452,17 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata",
"USE_STARSCAN=$use_starscan",
"PCSCAN_STACK_SUPPORTED=$pcscan_stack_supported",
"ENABLE_PKEYS=$enable_pkeys",
]
if (is_apple) {
# TODO(crbug.com/1414153): once TimeTicks::Now behavior is unified on iOS,
# this should be removed.
flags += [ "PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS=" +
"$partition_alloc_enable_mach_absolute_time_ticks" ]
}
}
buildflag_header("chromecast_buildflags") {

View File

@ -158,5 +158,9 @@ specific_include_rules = {
"+base",
"+third_party/abseil-cpp/absl/types/optional.h",
"+third_party/abseil-cpp/absl/types/variant.h",
],
"raw_ptr_test_support\.h$": [
"+testing/gmock/include/gmock/gmock.h",
"+third_party/abseil-cpp/absl/types/optional.h",
]
}

View File

@ -34,7 +34,7 @@ AddressPoolManager& AddressPoolManager::GetInstance() {
return singleton_;
}
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace {
@ -77,9 +77,10 @@ uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i)
for (size_t i = 0; i < std::size(aligned_pools_.pools_); ++i) {
aligned_pools_.pools_[i].Reset();
}
}
void AddressPoolManager::Remove(pool_handle handle) {
Pool* pool = GetPool(handle);
@ -102,7 +103,7 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
uintptr_t address,
size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools);
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
DecommitPages(address, length);
@ -299,7 +300,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true;
}
#else // PA_CONFIG(HAS_64_BITS_POINTERS)
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert(
kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
@ -531,7 +532,7 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
return true;
}
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {
AddressSpaceStats stats{};

View File

@ -15,8 +15,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
@ -53,7 +53,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
AddressPoolManager(const AddressPoolManager&) = delete;
AddressPoolManager& operator=(const AddressPoolManager&) = delete;
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
void Add(pool_handle handle, uintptr_t address, size_t length);
void Remove(pool_handle handle);
@ -63,7 +63,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// Return the base address of a pool.
uintptr_t GetPoolBaseAddress(pool_handle handle);
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// Reserves address space from the pool.
uintptr_t Reserve(pool_handle handle,
@ -76,7 +76,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
size_t length);
void ResetForTesting();
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
void MarkUsed(pool_handle handle, uintptr_t address, size_t size);
void MarkUnused(pool_handle handle, uintptr_t address, size_t size);
@ -87,7 +87,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
static bool IsManagedByBRPPool(uintptr_t address) {
return AddressPoolManagerBitmap::IsManagedByBRPPool(address);
}
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
void DumpStats(AddressSpaceStatsDumper* dumper);
@ -107,7 +107,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
// if PartitionAlloc is wholly unused in this process.)
bool GetStats(AddressSpaceStats* stats);
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
class Pool {
public:
constexpr Pool() = default;
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
};
PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
return &aligned_pools_.pools_[handle - 1];
}
@ -168,7 +168,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {};
} aligned_pools_ PA_PKEY_ALIGN;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
static PA_CONSTINIT AddressPoolManager singleton_;
};

View File

@ -7,7 +7,7 @@
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc::internal {
@ -34,4 +34,4 @@ std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_;
} // namespace partition_alloc::internal
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)

View File

@ -14,12 +14,11 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc {
@ -185,6 +184,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_

View File

@ -7,7 +7,7 @@
namespace partition_alloc::internal {
using pool_handle = unsigned;
enum pool_handle : unsigned;
} // namespace partition_alloc::internal

View File

@ -4,8 +4,8 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/random.h"
#include "build/build_config.h"
@ -18,7 +18,7 @@ namespace partition_alloc {
uintptr_t GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(internal::RandomValue());
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
random <<= 32ULL;
random |= static_cast<uintptr_t>(internal::RandomValue());
@ -26,7 +26,7 @@ uintptr_t GetRandomPageBase() {
// OS and build configuration.
random &= internal::ASLRMask();
random += internal::ASLROffset();
#else // PA_CONFIG(HAS_64_BITS_POINTERS)
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_WIN)
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
@ -40,7 +40,7 @@ uintptr_t GetRandomPageBase() {
#endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask();
random += internal::ASLROffset();
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask()));
return random;

View File

@ -9,7 +9,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
namespace partition_alloc {
@ -20,9 +19,9 @@ struct PoolStats {
// On 32-bit, pools are mainly logical entities, intermingled with
// allocations not managed by PartitionAlloc. The "largest available
// reservation" is not possible to measure in that case.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
size_t largest_available_reservation = 0;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif
};
struct AddressSpaceStats {
@ -30,14 +29,14 @@ struct AddressSpaceStats {
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PoolStats brp_pool_stats;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
PoolStats configurable_pool_stats;
#else
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
size_t blocklist_size; // measured in super pages
size_t blocklist_hit_count;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS)
PoolStats pkey_pool_stats;
#endif

View File

@ -14,6 +14,6 @@ use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
put_ref_count_in_previous_slot_default = true
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false

View File

@ -6,20 +6,9 @@ standalone repository for PartitionAlloc is hosted
## GN Args
External clients mainly need to set these six GN args:
``` none
# These are blocked on PA-E and `raw_ptr.h` and can never be true until
# we make them part of the standalone PA distribution.
use_partition_alloc_as_malloc_default = false
enable_mte_checked_ptr_support_default = false
enable_backup_ref_ptr_support_default = false
put_ref_count_in_previous_slot_default = false
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
```
PartitionAlloc's build will expect them at
External clients should examine the args described in
`build_overrides/partition_alloc.gni` and add them in their own source
tree. PartitionAlloc's build will expect them at
`//build_overrides/partition_alloc.gni`.
In addition, something must provide `build_with_chromium = false` to

View File

@ -136,8 +136,18 @@ bool UseMapJit() {
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue;
}
#endif // BUILDFLAG(IS_MAC)
#elif BUILDFLAG(IS_IOS)
bool UseMapJit() {
// Always enable MAP_JIT in simulator as it is supported unconditionally.
#if TARGET_IPHONE_SIMULATOR
return true;
#else
// TODO(https://crbug.com/1413818): Fill this out when the API it is
// available.
return false;
#endif // TARGET_IPHONE_SIMULATOR
}
#endif // BUILDFLAG(IS_IOS)
} // namespace
// |mmap| uses a nearby address if the hint address is blocked.
@ -166,7 +176,7 @@ uintptr_t SystemAllocPagesInternal(uintptr_t hint,
int access_flag = GetAccessFlags(accessibility);
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_APPLE)
// On macOS 10.14 and higher, executables that are code signed with the
// "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit"
@ -369,7 +379,6 @@ bool TryRecommitSystemPagesInternal(
}
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
#if !BUILDFLAG(IS_NACL)
void* ptr = reinterpret_cast<void*>(address);
#if BUILDFLAG(IS_APPLE)
int ret = madvise(ptr, length, MADV_FREE_REUSABLE);
@ -378,7 +387,7 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
ret = madvise(ptr, length, MADV_DONTNEED);
}
PA_PCHECK(ret == 0);
#else
#else // BUILDFLAG(IS_APPLE)
// We have experimented with other flags, but with suboptimal results.
//
// MADV_FREE (Linux): Makes our memory measurements less predictable;
@ -391,8 +400,7 @@ void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
return;
}
PA_PCHECK(ret == 0);
#endif
#endif // !BUILDFLAG(IS_NACL)
#endif // BUILDFLAG(IS_APPLE)
}
} // namespace partition_alloc::internal

View File

@ -10,8 +10,8 @@
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
namespace partition_alloc::internal {
@ -28,7 +28,7 @@ bool IsOutOfMemory(DWORD error) {
case ERROR_COMMITMENT_MINIMUM:
// Page file is too small.
case ERROR_COMMITMENT_LIMIT:
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// Not enough memory resources are available to process this command.
//
// It is not entirely clear whether this error pertains to out of address

View File

@ -16,6 +16,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -36,7 +37,7 @@
namespace partition_alloc::internal {
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace {
@ -422,6 +423,6 @@ PageCharacteristics page_characteristics;
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
} // namespace partition_alloc::internal

View File

@ -26,7 +26,7 @@
#include "build/build_config.h"
// The feature is not applicable to 32-bit address space.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
namespace partition_alloc {
@ -52,7 +52,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_DCHECK(!IsInBRPPool(address));
#endif
pool_handle pool = 0;
pool_handle pool = kNullPoolHandle;
uintptr_t base = 0;
if (IsInRegularPool(address)) {
pool = kRegularPoolHandle;
@ -475,6 +475,6 @@ PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
} // namespace partition_alloc
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_

View File

@ -14,7 +14,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_hooks.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
@ -105,16 +104,6 @@ void PartitionAllocGlobalUninitForTesting() {
#if BUILDFLAG(ENABLE_PKEYS)
internal::PartitionAddressSpace::UninitPkeyPoolForTesting();
#endif
#if BUILDFLAG(USE_STARSCAN)
internal::PCScan::UninitForTesting(); // IN-TEST
#endif
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if PA_CONFIG(HAS_64_BITS_POINTERS)
internal::PartitionAddressSpace::UninitForTesting();
#else
internal::AddressPoolManager::GetInstance().ResetForTesting();
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
internal::g_oom_handling_function = nullptr;
}

View File

@ -5,6 +5,23 @@
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/partition_alloc.gni")
if (is_apple) {
import("//build/config/features.gni")
}
# Whether 64-bit pointers are used.
# A static_assert in partition_alloc_config.h verifies that.
if (is_nacl) {
# NaCl targets don't use 64-bit pointers.
has_64_bit_pointers = false
} else if (current_cpu == "x64" || current_cpu == "arm64" || current_cpu == "mips64el") {
has_64_bit_pointers = true
} else if (current_cpu == "x86" || current_cpu == "arm" || current_cpu == "mipsel") {
has_64_bit_pointers = false
} else {
assert(false, "Unknown CPU: $current_cpu")
}
if (use_partition_alloc_as_malloc_default) {
_default_allocator = "partition"
} else {
@ -119,6 +136,14 @@ declare_args() {
enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enable the feature flags required to check for dangling pointers. That is to
# say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
#
# This is meant to be used on bots only. It is much easier to override the
# feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flags_for_bots = false
# Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid
# accounting for the cost of calling the PA's embedder's callbacks when a
@ -136,10 +161,20 @@ declare_args() {
# Shadow metadata is still under development and only supports Linux
# for now.
enable_shadow_metadata = false
if (is_apple) {
# use_blink currently assumes mach absolute ticks (eg, to ensure trace
# events cohere).
partition_alloc_enable_mach_absolute_time_ticks = is_mac || use_blink
}
}
# *Scan is currently only used by Chromium.
use_starscan = build_with_chromium
# *Scan is currently only used by Chromium, and supports only 64-bit.
use_starscan = build_with_chromium && has_64_bit_pointers
pcscan_stack_supported =
use_starscan && (current_cpu == "x64" || current_cpu == "x86" ||
current_cpu == "arm" || current_cpu == "arm64")
# We want to provide assertions that guard against inconsistent build
# args, but there is no point in having them fire if we're not building
@ -223,6 +258,14 @@ assert(!use_asan_backup_ref_ptr || is_asan,
assert(!use_asan_unowned_ptr || is_asan,
"AsanUnownedPtr requires AddressSanitizer")
if (is_apple) {
assert(!use_blink || partition_alloc_enable_mach_absolute_time_ticks,
"use_blink requires partition_alloc_enable_mach_absolute_time_ticks")
assert(!is_mac || partition_alloc_enable_mach_absolute_time_ticks,
"mac requires partition_alloc_enable_mach_absolute_time_ticks")
}
# AsanBackupRefPtr is not supported outside Chromium. The implementation is
# entangled with `//base`. The code is only physically located with the
# rest of `raw_ptr` to keep it together.

View File

@ -137,7 +137,7 @@
#endif
// MemorySanitizer annotations.
#if defined(MEMORY_SANITIZER) && !BUILDFLAG(IS_NACL)
#if defined(MEMORY_SANITIZER)
#include <sanitizer/msan_interface.h>
// Mark a memory region fully initialized.

View File

@ -41,13 +41,7 @@
#if defined(COMPILER_GCC)
#if BUILDFLAG(IS_NACL)
// Crash report accuracy is not guaranteed on NaCl.
#define PA_TRAP_SEQUENCE1_() __builtin_trap()
#define PA_TRAP_SEQUENCE2_() asm volatile("")
#elif defined(ARCH_CPU_X86_FAMILY)
#if defined(ARCH_CPU_X86_FAMILY)
// TODO(https://crbug.com/958675): In theory, it should be possible to use just
// int3. However, there are a number of crashes with SIGILL as the exception

View File

@ -12,7 +12,7 @@
namespace partition_alloc::internal::base {
#if defined(__GLIBC__) || BUILDFLAG(IS_NACL)
#if defined(__GLIBC__)
#define USE_HISTORICAL_STRERROR_R 1
// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
// is defined, but the symbol is renamed to __gnu_strerror_r which only exists

View File

@ -18,7 +18,7 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include "third_party/lss/linux_syscall_support.h"
#elif BUILDFLAG(IS_MAC)
// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
@ -68,7 +68,7 @@ namespace partition_alloc::internal::base {
// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
// it or some form of it.
void RandBytes(void* output, size_t output_length) {
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
// We have to call `getrandom` via Linux Syscall Support, rather than through
// the libc wrapper, because we might not have an up-to-date libc (e.g. on
// some bots).

View File

@ -116,11 +116,6 @@ PlatformThreadId PlatformThread::CurrentId() {
return zx_thread_self();
#elif BUILDFLAG(IS_SOLARIS) || BUILDFLAG(IS_QNX)
return pthread_self();
#elif BUILDFLAG(IS_NACL) && defined(__GLIBC__)
return pthread_self();
#elif BUILDFLAG(IS_NACL) && !defined(__GLIBC__)
// Pointers are 32-bits in NaCl.
return reinterpret_cast<int32_t>(pthread_self());
#elif BUILDFLAG(IS_POSIX) && BUILDFLAG(IS_AIX)
return pthread_self();
#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_AIX)

View File

@ -26,7 +26,7 @@
#include <sys/resource.h>
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif
@ -52,16 +52,14 @@ void* ThreadFunc(void* params) {
delegate = thread_params->delegate;
#if !BUILDFLAG(IS_NACL)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadCreated(GetStackPointer());
#endif
#endif
}
delegate->ThreadMain();
#if !BUILDFLAG(IS_NACL) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadDestroyed();
#endif

View File

@ -14,7 +14,7 @@
#include <windows.h>
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/stack/stack.h"
#endif
@ -62,7 +62,7 @@ DWORD __stdcall ThreadFunc(void* params) {
GetCurrentProcess(), &platform_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadCreated(GetStackPointer());
#endif
@ -74,7 +74,7 @@ DWORD __stdcall ThreadFunc(void* params) {
delete thread_params;
delegate->ThreadMain();
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
PCScan::NotifyThreadDestroyed();
#endif
return 0;

View File

@ -75,6 +75,10 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE)
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_FUCHSIA)
#include <zircon/types.h>
#endif
@ -136,9 +140,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeDelta {
#if BUILDFLAG(IS_FUCHSIA)
static TimeDelta FromZxDuration(zx_duration_t nanos);
#endif
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeDelta FromMachTime(uint64_t mach_time);
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
// Converts an integer value representing TimeDelta to a class. This is used
// when deserializing a |TimeDelta| structure, using a value known to be
@ -879,14 +885,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks
static TimeTicks FromQPCValue(LONGLONG qpc_value);
#endif
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
// Sets the current Mach timebase to `timebase`. Returns the old timebase.
static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
mach_timebase_info_data_t timebase);
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)
// Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
@ -979,7 +987,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadTicks
// Returns true if ThreadTicks::Now() is supported on this system.
[[nodiscard]] static bool IsSupported() {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
return true;
#elif BUILDFLAG(IS_WIN)
return IsSupportedWin();

View File

@ -14,18 +14,21 @@
#include <sys/types.h>
#include <time.h>
#if BUILDFLAG(IS_IOS)
#include <errno.h>
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {
namespace {
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns a pointer to the initialized Mach timebase info struct.
mach_timebase_info_data_t* MachTimebaseInfo() {
static mach_timebase_info_data_t timebase_info = []() {
@ -78,48 +81,32 @@ int64_t MachTimeToMicroseconds(uint64_t mach_time) {
// 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
return checked_cast<int64_t>(microseconds);
}
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns monotonically growing number of ticks in microseconds since some
// unspecified starting point.
int64_t ComputeCurrentTicks() {
#if BUILDFLAG(IS_IOS)
// iOS 10 supports clock_gettime(CLOCK_MONOTONIC, ...), which is
// around 15 times faster than sysctl() call. Use it if possible;
// otherwise, fall back to sysctl().
if (__builtin_available(iOS 10, *)) {
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
struct timespec tp;
if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
}
}
// clock_gettime() returns 0 on success and -1 on failure. Failure can only
// happen because of bad arguments (unsupported clock type or timespec
// pointer out of accessible address space). Here it is known that neither
// can happen since the timespec parameter is stack allocated right above and
// `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is
// supported on.
int res = clock_gettime(CLOCK_MONOTONIC, &tp);
PA_DCHECK(0 == res) << "Failed clock_gettime, errno: " << errno;
// On iOS mach_absolute_time stops while the device is sleeping. Instead use
// now - KERN_BOOTTIME to get a time difference that is not impacted by clock
// changes. KERN_BOOTTIME will be updated by the system whenever the system
// clock change.
struct timeval boottime;
int mib[2] = {CTL_KERN, KERN_BOOTTIME};
size_t size = sizeof(boottime);
int kr = sysctl(mib, std::size(mib), &boottime, &size, nullptr, 0);
PA_DCHECK(KERN_SUCCESS == kr);
TimeDelta time_difference =
subtle::TimeNowIgnoringOverride() -
(Time::FromTimeT(boottime.tv_sec) + Microseconds(boottime.tv_usec));
return time_difference.InMicroseconds();
return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
#else
// mach_absolute_time is it when it comes to ticks on the Mac. Other calls
// with less precision (such as TickCount) just call through to
// mach_absolute_time.
return MachTimeToMicroseconds(mach_absolute_time());
#endif // BUILDFLAG(IS_IOS)
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
}
int64_t ComputeThreadTicks() {
#if BUILDFLAG(IS_IOS)
PA_NOTREACHED();
return 0;
#else
// The pthreads library keeps a cached reference to the thread port, which
// does not have to be released like mach_thread_self() does.
mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
@ -142,7 +129,6 @@ int64_t ComputeThreadTicks() {
absolute_micros += (thread_info_data.user_time.microseconds +
thread_info_data.system_time.microseconds);
return absolute_micros.ValueOrDie();
#endif // BUILDFLAG(IS_IOS)
}
} // namespace
@ -200,12 +186,12 @@ NSDate* Time::ToNSDate() const {
// TimeDelta ------------------------------------------------------------------
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
return Microseconds(MachTimeToMicroseconds(mach_time));
}
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// TimeTicks ------------------------------------------------------------------
@ -225,7 +211,7 @@ bool TimeTicks::IsConsistentAcrossProcesses() {
return true;
}
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
@ -241,15 +227,15 @@ mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
return orig_timebase;
}
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeTicks::Clock TimeTicks::GetClock() {
#if BUILDFLAG(IS_IOS)
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
#else
return Clock::MAC_MACH_ABSOLUTE_TIME;
#endif // BUILDFLAG(IS_IOS)
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
}
// ThreadTicks ----------------------------------------------------------------

View File

@ -23,37 +23,25 @@
// 4. Do not use PA_CONFIG() when defining config macros, or it will lead to
// recursion. Either use #if/#else, or PA_CONFIG_MY_SETTING() directly.
// 5. Try to use constexpr instead of macros wherever possible.
// TODO(bartekn): Convert macros to constexpr as much as possible.
// TODO(bartekn): Convert macros to constexpr or BUILDFLAG as much as possible.
#define PA_CONFIG(flag) (PA_CONFIG_##flag())
// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
// address space. The only known case where address space is 32-bit is NaCl, so
// eliminate it explicitly. static_assert below ensures that others won't slip
// through.
#define PA_CONFIG_HAS_64_BITS_POINTERS() \
(defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL))
#if PA_CONFIG(HAS_64_BITS_POINTERS)
// Assert that the heuristic in partition_alloc.gni is accurate on supported
// configurations.
#if BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert(sizeof(void*) == 8, "");
#else
static_assert(sizeof(void*) != 8, "");
#endif
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
// PCScan supports 64 bits only and is disabled outside Chromium.
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#define PA_CONFIG_ALLOW_PCSCAN() 1
#else
#define PA_CONFIG_ALLOW_PCSCAN() 0
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && \
#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
(defined(__ARM_NEON) || defined(__ARM_NEON__)) && defined(__ARM_FP)
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 1
#else
#define PA_CONFIG_STARSCAN_NEON_SUPPORTED() 0
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS)
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
// Allow PA to select an alternate pool size at run-time before initialization,
// rather than using a single constexpr value.
//
@ -64,19 +52,19 @@ static_assert(sizeof(void*) != 8, "");
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 1
#else
#define PA_CONFIG_DYNAMICALLY_SELECT_POOL_SIZE() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_IOS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_IOS)
// Puts the regular and BRP pools right next to each other, so that we can
// check "belongs to one of the two pools" with a single bitmask operation.
//
// This setting is specific to 64-bit, as 32-bit has a different implementation.
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS)
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(GLUE_CORE_POOLS)
#define PA_CONFIG_GLUE_CORE_POOLS() 1
#else
#define PA_CONFIG_GLUE_CORE_POOLS() 0
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && \
#if BUILDFLAG(HAS_64_BIT_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#include <linux/version.h>
// TODO(bikineev): Enable for ChromeOS.
@ -84,10 +72,10 @@ static_assert(sizeof(void*) != 8, "");
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0))
#else
#define PA_CONFIG_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) &&
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) &&
// (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(USE_STARSCAN)
// Use card table to avoid races for PCScan configuration without safepoints.
// The card table provides the guaranteee that for a marked card the underling
// super-page is fully initialized.
@ -95,11 +83,7 @@ static_assert(sizeof(void*) != 8, "");
#else
// The card table is permanently disabled for 32-bit.
#define PA_CONFIG_STARSCAN_USE_CARD_TABLE() 0
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(USE_STARSCAN)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE) && !PA_CONFIG(ALLOW_PCSCAN)
#error "Card table can only be used when *Scan is allowed"
#endif
#endif // BUILDFLAG(USE_STARSCAN)
// Use batched freeing when sweeping pages. This builds up a freelist in the
// scanner thread and appends to the slot-span's freelist only once.
@ -184,7 +168,7 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8);
#endif
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#define PA_CONFIG_USE_OOB_POISON() 1
#else
#define PA_CONFIG_USE_OOB_POISON() 0
@ -195,7 +179,7 @@ static_assert(sizeof(void*) == 8);
// Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
PA_CONFIG(HAS_64_BITS_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
static_assert(sizeof(void*) == 8);
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
#else
@ -321,7 +305,7 @@ constexpr bool kUseLazyCommit = false;
// This feature is only enabled with 64-bit environment because pools work
// differently with 32-bits pointers (see glossary).
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
PA_CONFIG(HAS_64_BITS_POINTERS)
BUILDFLAG(HAS_64_BIT_POINTERS)
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 1
#else
#define PA_CONFIG_ENABLE_SHADOW_METADATA() 0
@ -340,7 +324,7 @@ constexpr bool kUseLazyCommit = false;
// Enables compressed (4-byte) pointers that can point within the core pools
// (Regular + BRP).
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 1
#if !PA_CONFIG(GLUE_CORE_POOLS)
@ -354,7 +338,7 @@ constexpr bool kUseLazyCommit = false;
// TODO(1376980): Address MTE once it's enabled.
#error "Compressed pointers don't support tag in the upper bits"
#endif
#else // PA_CONFIG(HAS_64_BITS_POINTERS) &&
#else // BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_POINTER_COMPRESSION)
#define PA_CONFIG_POINTER_COMPRESSION() 0
#endif

View File

@ -13,6 +13,7 @@
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h"
@ -260,12 +261,29 @@ constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// PartitionAlloc's address space is split into pools. See `glossary.md`.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_PKEYS)
constexpr size_t kNumPools = 4;
#else
constexpr size_t kNumPools = 3;
enum pool_handle : unsigned {
kNullPoolHandle = 0u,
kRegularPoolHandle,
kBRPPoolHandle,
#if BUILDFLAG(HAS_64_BIT_POINTERS)
kConfigurablePoolHandle,
#endif
// New pool_handles will be added here.
#if BUILDFLAG(ENABLE_PKEYS)
// The pkey pool must come last since we pkey_mprotect its entry in the
// metadata tables, e.g. AddressPoolManager::aligned_pools_
kPkeyPoolHandle,
#endif
kMaxPoolHandle
};
// kNullPoolHandle doesn't have metadata, hence - 1
constexpr size_t kNumPools = kMaxPoolHandle - 1;
// Maximum pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which
// allows to choose a different size at initialization time for certain
@ -277,22 +295,18 @@ constexpr size_t kNumPools = 3;
//
// When pointer compression is enabled, we cannot use large pools (at most
// 8GB for each of the glued pools).
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || PA_CONFIG(POINTER_COMPRESSION)
constexpr size_t kPoolMaxSize = 8 * kGiB;
#else
constexpr size_t kPoolMaxSize = 16 * kGiB;
#endif
#else // PA_CONFIG(HAS_64_BITS_POINTERS)
constexpr size_t kNumPools = 2;
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr size_t kPoolMaxSize = 4 * kGiB;
#endif
constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
static constexpr pool_handle kRegularPoolHandle = 1;
static constexpr pool_handle kBRPPoolHandle = 2;
static constexpr pool_handle kConfigurablePoolHandle = 3;
#if BUILDFLAG(ENABLE_PKEYS)
static constexpr pool_handle kPkeyPoolHandle = 4;
static_assert(
kPkeyPoolHandle == kNumPools,
"The pkey pool must come last since we pkey_mprotect its metadata.");
@ -327,7 +341,7 @@ constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool;
}
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools.
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
@ -337,7 +351,7 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift;
}
#else // PA_CONFIG(HAS_64_BITS_POINTERS)
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
// In 32-bit mode, address space is space is a scarce resource. Use the system
// allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps
@ -351,7 +365,7 @@ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift());
}
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() {

View File

@ -0,0 +1,50 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_
#include "base/allocator/partition_allocator/partition_alloc.h"
namespace partition_alloc {
namespace internal {
constexpr bool AllowLeaks = true;
constexpr bool DisallowLeaks = false;
// A subclass of PartitionAllocator for testing. It will free all resources,
// i.e. allocated memory, memory inside freelist, and so on, when destructing
// it or when manually invoking reset().
// If need to check if there are any memory allocated but not freed yet,
// use allow_leaks=false. We will see CHECK failure inside reset() if any
// leak is detected. Otherwise (e.g. intentional leaks), use allow_leaks=true.
template <bool thread_safe, bool allow_leaks>
struct PartitionAllocatorForTesting : public PartitionAllocator<thread_safe> {
PartitionAllocatorForTesting() : PartitionAllocator<thread_safe>() {}
explicit PartitionAllocatorForTesting(PartitionOptions opts)
: PartitionAllocator<thread_safe>() {
PartitionAllocator<thread_safe>::init(opts);
}
~PartitionAllocatorForTesting() { reset(); }
PA_ALWAYS_INLINE void reset() {
PartitionAllocator<thread_safe>::root()->ResetForTesting(allow_leaks);
}
};
} // namespace internal
using PartitionAllocatorForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::DisallowLeaks>;
using PartitionAllocatorAllowLeaksForTesting =
internal::PartitionAllocatorForTesting<internal::ThreadSafe,
internal::AllowLeaks>;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FOR_TESTING_H_

View File

@ -32,6 +32,8 @@ std::atomic<PartitionAllocHooks::FreeOverrideHook*>
PartitionAllocHooks::free_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
PartitionAllocHooks::realloc_override_hook_(nullptr);
std::atomic<PartitionAllocHooks::QuarantineOverrideHook*>
PartitionAllocHooks::quarantine_override_hook_(nullptr);
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) {
@ -118,4 +120,9 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
return false;
}
void PartitionAllocHooks::SetQuarantineOverrideHook(
QuarantineOverrideHook* hook) {
quarantine_override_hook_.store(hook, std::memory_order_release);
}
} // namespace partition_alloc

View File

@ -8,6 +8,7 @@
#include <atomic>
#include <cstddef>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
namespace partition_alloc {
@ -34,6 +35,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
// the size of the underlying allocation.
typedef bool ReallocOverrideHook(size_t* out, void* address);
// Special hook type, independent of the rest. Triggered when `free()` detects
// outstanding references to the allocation.
// IMPORTANT: Make sure the hook always overwrites `[address, address + size)`
// with a bit pattern that cannot be interpreted as a valid memory address.
typedef void QuarantineOverrideHook(void* address, size_t size);
// To unhook, call Set*Hooks with nullptrs.
static void SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook);
@ -65,6 +72,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
const char* type_name);
static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
PA_ALWAYS_INLINE static QuarantineOverrideHook* GetQuarantineOverrideHook() {
return quarantine_override_hook_.load(std::memory_order_acquire);
}
static void SetQuarantineOverrideHook(QuarantineOverrideHook* hook);
private:
// Single bool that is used to indicate whether observer or allocation hooks
// are set to reduce the numbers of loads required to check whether hooking is
@ -78,6 +91,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
static std::atomic<FreeOverrideHook*> free_override_hook_;
static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
static std::atomic<QuarantineOverrideHook*> quarantine_override_hook_;
};
} // namespace partition_alloc

View File

@ -38,7 +38,7 @@
#include "build/build_config.h"
#if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif
namespace partition_alloc::internal {
@ -74,7 +74,7 @@ template <bool thread_safe>
PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
#if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
// that a partial super page is allowed at the end. Since the block list uses
// kSuperPageSize granularity, a partial super page is considered blocked if
@ -93,7 +93,7 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
}
return true;
}
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) &&
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
// Reserves |requested_size| worth of super pages from the specified pool.
@ -123,7 +123,7 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
// In 32-bit mode, when allocating from BRP pool, verify that the requested
// allocation honors the block list. Find a better address otherwise.
#if !PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if !BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (pool == kBRPPoolHandle) {
constexpr int kMaxRandomAddressTries = 10;
for (int i = 0; i < kMaxRandomAddressTries; ++i) {
@ -172,10 +172,10 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
reserved_address = 0;
}
}
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS) &&
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
// Only mark the region as belonging to the pool after it has passed the
// blocklist check in order to avoid a potential race with destructing a
// raw_ptr<T> object that points to non-PA memory in another thread.
@ -284,7 +284,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{
// Reserving memory from the pool is actually not a syscall on 64 bit
// platforms.
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
ScopedSyscallTimer timer{root};
#endif
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
@ -434,7 +434,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{
ScopedSyscallTimer timer{root};
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size);
#endif

View File

@ -9,8 +9,8 @@
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc::internal {
@ -39,13 +39,13 @@ constexpr size_t OrderSubIndexMask(uint8_t order) {
(kNumBucketsPerOrderBits + 1);
}
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#define PA_BITS_PER_SIZE_T 64
static_assert(kBitsPerSizeT == 64, "");
#else
#define PA_BITS_PER_SIZE_T 32
static_assert(kBitsPerSizeT == 32, "");
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),

View File

@ -249,9 +249,7 @@ class PartitionFreelistEntry {
(next_address & kSuperPageBaseMask);
#if BUILDFLAG(USE_FREESLOT_BITMAP)
bool marked_as_free_in_bitmap =
for_thread_cache
? true
: !FreeSlotBitmapSlotIsUsed(reinterpret_cast<uintptr_t>(next));
for_thread_cache ? true : !FreeSlotBitmapSlotIsUsed(next_address);
#else
bool marked_as_free_in_bitmap = true;
#endif

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
@ -323,23 +322,26 @@ void UnmapNow(uintptr_t reservation_start,
// In 32-bit mode, the beginning of a reservation may be excluded from the
// BRP pool, so shift the pointer. Other pools don't have this logic.
PA_DCHECK(IsManagedByPartitionAllocBRPPool(
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
reservation_start
#else
reservation_start +
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
));
} else
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
{
PA_DCHECK(
pool == kRegularPoolHandle
PA_DCHECK(pool == kRegularPoolHandle
#if BUILDFLAG(ENABLE_PKEYS)
|| pool == kPkeyPoolHandle
#endif
|| (IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle));
#if BUILDFLAG(HAS_64_BIT_POINTERS)
||
(IsConfigurablePoolAvailable() && pool == kConfigurablePoolHandle)
#endif
);
// Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
#if BUILDFLAG(ENABLE_PKEYS)
@ -365,7 +367,7 @@ void UnmapNow(uintptr_t reservation_start,
*offset_ptr++ = kOffsetTagNotAllocated;
}
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
reservation_size);
#endif

View File

@ -21,7 +21,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
@ -138,7 +137,7 @@ struct SlotSpanMetadata {
PartitionBucket<thread_safe>* const bucket = nullptr;
// CHECK()ed in AllocNewSlotSpan().
#if PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
// System page size is not a constant on Apple OSes, but is either 4 or 16kiB
// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
// PartitionPageSize() is 4 times the OS page size.
@ -155,7 +154,7 @@ struct SlotSpanMetadata {
// larger, so it doesn't have as many slots.
static constexpr size_t kMaxSlotsPerSlotSpan =
PartitionPageSize() / kSmallestBucket;
#endif // PA_CONFIG(HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(IS_APPLE)
// The maximum number of bits needed to cover all currently supported OSes.
static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
@ -482,7 +481,8 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
return reinterpret_cast<AllocationStateMap*>(
SuperPageStateBitmapAddr(super_page));
}
#else
#else // BUILDFLAG(USE_STARSCAN)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() {

View File

@ -736,13 +736,24 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
// this function on PartitionRoots without a thread cache.
PA_CHECK(!flags.with_thread_cache);
auto pool_handle = ChoosePool();
#if BUILDFLAG(ENABLE_PKEYS)
// The pages managed by pkey will be free-ed at UninitPKeyForTesting().
// Don't invoke FreePages() for the pages.
if (pool_handle == internal::kPkeyPoolHandle) {
return;
}
PA_DCHECK(pool_handle < internal::kNumPools);
#else
PA_DCHECK(pool_handle <= internal::kNumPools);
#endif
auto* curr = first_extent;
while (curr != nullptr) {
auto* next = curr->next;
uintptr_t address = SuperPagesBeginFromExtent(curr);
size_t size =
internal::kSuperPageSize * curr->number_of_consecutive_super_pages;
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
internal::AddressPoolManager::GetInstance().MarkUnused(pool_handle, address,
size);
#endif
@ -759,7 +770,7 @@ void PartitionRoot<thread_safe>::EnableMac11MallocSizeHackForTesting() {
}
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
namespace {
std::atomic<bool> g_reserve_brp_guard_region_called;
// An address constructed by repeating `kQuarantinedByte` shouldn't never point
@ -795,7 +806,7 @@ void ReserveBackupRefPtrGuardRegionIfNeeded() {
}
} // namespace
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
// !PA_CONFIG(HAS_64_BITS_POINTERS)
// !BUILDFLAG(HAS_64_BIT_POINTERS)
template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
@ -824,12 +835,12 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// running on the right hardware.
::partition_alloc::internal::InitializeMTESupportIfNeeded();
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// Reserve address space for partition alloc.
internal::PartitionAddressSpace::Init();
#endif
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && !BUILDFLAG(HAS_64_BIT_POINTERS)
ReserveBackupRefPtrGuardRegionIfNeeded();
#endif
@ -1499,6 +1510,73 @@ void PartitionRoot<thread_safe>::DeleteForTesting(
delete partition_root;
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetForTesting(bool allow_leaks) {
if (flags.with_thread_cache) {
ThreadCache::SwapForTesting(nullptr);
flags.with_thread_cache = false;
}
::partition_alloc::internal::ScopedGuard guard(lock_);
#if BUILDFLAG(PA_DCHECK_IS_ON)
if (!allow_leaks) {
unsigned num_allocated_slots = 0;
for (Bucket& bucket : buckets) {
if (bucket.active_slot_spans_head !=
internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
for (internal::SlotSpanMetadata<thread_safe>* slot_span =
bucket.active_slot_spans_head;
slot_span; slot_span = slot_span->next_slot_span) {
num_allocated_slots += slot_span->num_allocated_slots;
}
}
// Full slot spans are nowhere. Need to see bucket.num_full_slot_spans
// to count the number of full slot spans' slots.
if (bucket.num_full_slot_spans) {
num_allocated_slots +=
bucket.num_full_slot_spans * bucket.get_slots_per_span();
}
}
PA_DCHECK(num_allocated_slots == 0);
// Check for direct-mapped allocations.
PA_DCHECK(!direct_map_list);
}
#endif
DestructForTesting(); // IN-TEST
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
if (initialized) {
internal::PartitionRootEnumerator::Instance().Unregister(this);
}
#endif // PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
for (Bucket& bucket : buckets) {
bucket.active_slot_spans_head =
SlotSpan::get_sentinel_slot_span_non_const();
bucket.empty_slot_spans_head = nullptr;
bucket.decommitted_slot_spans_head = nullptr;
bucket.num_full_slot_spans = 0;
}
next_super_page = 0;
next_partition_page = 0;
next_partition_page_end = 0;
current_extent = nullptr;
first_extent = nullptr;
direct_map_list = nullptr;
for (auto& entity : global_empty_slot_span_ring) {
entity = nullptr;
}
global_empty_slot_span_ring_index = 0;
global_empty_slot_span_ring_size = internal::kDefaultEmptySlotSpanRingSize;
initialized = false;
}
template <bool thread_safe>
void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() {
::partition_alloc::internal::ScopedGuard guard{lock_};

View File

@ -74,7 +74,6 @@
#if BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#endif
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
@ -406,6 +405,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionRoot()
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
// TODO(tasak): remove ~PartitionRoot() after confirming all tests
// don't need ~PartitionRoot().
~PartitionRoot();
// This will unreserve any space in the pool that the PartitionRoot is
@ -585,6 +586,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PartitionStatsDumper* partition_stats_dumper);
static void DeleteForTesting(PartitionRoot* partition_root);
void ResetForTesting(bool allow_leaks);
void ResetBookkeepingForTesting();
PA_ALWAYS_INLINE BucketDistribution GetBucketDistribution() const {
@ -653,10 +655,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
}
internal::pool_handle ChoosePool() const {
#if BUILDFLAG(HAS_64_BIT_POINTERS)
if (flags.use_configurable_pool) {
PA_DCHECK(IsConfigurablePoolAvailable());
return internal::kConfigurablePoolHandle;
}
#endif
#if BUILDFLAG(ENABLE_PKEYS)
if (flags.pkey != internal::kDefaultPkey) {
return internal::kPkeyPoolHandle;
@ -962,13 +966,13 @@ class ScopedSyscallTimer {
PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// Use this variant of GetDirectMapReservationStart as it has better
// performance.
uintptr_t offset = OffsetInBRPPool(address);
uintptr_t reservation_start =
GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
#else
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
uintptr_t reservation_start = GetDirectMapReservationStart(address);
#endif
if (!reservation_start) {
@ -1396,8 +1400,13 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// potential use-after-free issues into unexploitable crashes.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
brp_zapping_enabled())) {
internal::SecureMemset(object, internal::kQuarantinedByte,
slot_span->GetUsableSize(this));
auto usable_size = slot_span->GetUsableSize(this);
auto hook = PartitionAllocHooks::GetQuarantineOverrideHook();
if (PA_UNLIKELY(hook)) {
hook(object, usable_size);
} else {
internal::SecureMemset(object, internal::kQuarantinedByte, usable_size);
}
}
if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {

View File

@ -11,12 +11,12 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include <cstddef>
#include <cstdint>
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
#error "pkey support requires 64 bit pointers"
#endif

View File

@ -85,130 +85,90 @@ namespace base {
// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
// lightweight replacement of a raw pointer, hence performance is critical.
namespace raw_ptr_traits {
// This is a bitfield representing the different flags that can be applied to a
// raw_ptr.
//
// Internal use only: Developers shouldn't use those values directly.
//
// Housekeeping rules: Try not to change trait values, so that numeric trait
// values stay constant across builds (could be useful e.g. when analyzing stack
// traces). A reasonable exception to this rule are `*ForTest` traits. As a
// matter of fact, we propose that new non-test traits are added before the
// `*ForTest` traits.
enum class RawPtrTraits : unsigned {
kEmpty = 0,
// Disables dangling pointer detection, but keeps other raw_ptr protections.
//
// Don't use directly, use DisableDanglingPtrDetection or DanglingUntriaged
// instead.
struct MayDangle {};
// Disables any protections when MTECheckedPtrImpl is requested, by switching to
// NoOpImpl in that case.
// Don't use directly, use DegradeToNoOpWhenMTE instead.
struct DisableMTECheckedPtr {};
// Disables any hooks, by switching to NoOpImpl in that case.
// Internal use only.
struct DisableHooks {};
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track the
// number of times the raw_ptr is wrapped, unrwapped, etc.
// Test only.
struct UseCountingWrapperForTest {};
// Very internal use only.
using EmptyTrait = void;
kMayDangle = (1 << 0),
template <typename Trait>
inline constexpr bool IsValidTraitV =
std::is_same_v<Trait, MayDangle> ||
std::is_same_v<Trait, DisableMTECheckedPtr> ||
std::is_same_v<Trait, DisableHooks> ||
std::is_same_v<Trait, UseCountingWrapperForTest> ||
std::is_same_v<Trait, EmptyTrait>;
template <typename... Traits>
struct TraitPack {
static_assert((IsValidTraitV<Traits> && ...), "Unknown raw_ptr trait");
template <typename TraitToSearch>
static inline constexpr bool HasV =
(std::is_same_v<TraitToSearch, Traits> || ...);
};
// Replaces an unwanted trait with EmptyTrait.
template <typename TraitToExclude>
struct ExcludeTrait {
template <typename Trait>
using Filter = std::
conditional_t<std::is_same_v<TraitToExclude, Trait>, EmptyTrait, Trait>;
};
// Use TraitBundle alias, instead of TraitBundleInt, so that traits in different
// order and duplicates resolve to the same underlying type. For example,
// TraitBundle<A,B> is the same C++ type as TraitBundle<B,A,B,A>. This also
// allows to entirely ignore a trait under some build configurations, to prevent
// it from turning TraitBundle into a different C++ type.
//
// It'd be easier to just pass bools into TraitBundleInt, instead of echo'ing
// the trait, but that would lead to less readable compiler messages that spit
// out the type. TraitBundleInt<MayDangle,EmptyTrait,DisableHooks,EmptyTrait> is
// more readable than TraitBundleInt<true,false,true,false>.
template <typename... Traits>
struct TraitBundleInt;
template <typename... Traits>
using TraitBundle = TraitBundleInt<
std::conditional_t<TraitPack<Traits...>::template HasV<MayDangle>,
MayDangle,
EmptyTrait>,
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
std::conditional_t<
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>,
DisableMTECheckedPtr,
EmptyTrait>,
// Disables any protections when MTECheckedPtrImpl is requested, by
// switching to NoOpImpl in that case.
//
// Don't use directly, use DegradeToNoOpWhenMTE instead.
kDisableMTECheckedPtr = (1 << 1),
#else
// Entirely ignore DisableMTECheckedPtr on non-MTECheckedPtr builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
kDisableMTECheckedPtr = kEmpty,
#endif
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<TraitPack<Traits...>::template HasV<DisableHooks>,
DisableHooks,
EmptyTrait>,
// Disables any hooks, by switching to NoOpImpl in that case.
//
// Internal use only.
kDisableHooks = (1 << 2),
#else
// Entirely ignore DisableHooks on non-ASanBRP builds, so that
// TraitBundle (and thus raw_ptr/raw_ref) with that trait is considered
// exactly the same type as without it. This matches the long standing
// behavior prior to crrev.com/c/4113514.
EmptyTrait,
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
std::conditional_t<
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>,
UseCountingWrapperForTest,
EmptyTrait>>;
template <typename... Traits>
struct TraitBundleInt {
static constexpr bool kMayDangle =
TraitPack<Traits...>::template HasV<MayDangle>;
static constexpr bool kDisableMTECheckedPtr =
TraitPack<Traits...>::template HasV<DisableMTECheckedPtr>;
static constexpr bool kDisableHooks =
TraitPack<Traits...>::template HasV<DisableHooks>;
static constexpr bool kUseCountingWrapperForTest =
TraitPack<Traits...>::template HasV<UseCountingWrapperForTest>;
// Assert that on certain build configurations, the related traits are not
// even used. If they were, they'd result in a different C++ type, and would
// trigger more costly cross-type raw_ptr/raw_ref conversions.
#if !PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
static_assert(!kDisableMTECheckedPtr);
#endif
#if !BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
static_assert(!kDisableHooks);
kDisableHooks = kEmpty,
#endif
// Use TraitBundle, instead of TraitBundleInt, to re-normalize trait list
// (i.e. order canonically and remove duplicates).
template <typename TraitToAdd>
using AddTraitT = TraitBundle<Traits..., TraitToAdd>;
// Unlike AddTraitT, no need to re-normalize because ExcludeTrait preserves
// the trait list structure.
template <typename TraitToRemove>
using RemoveTraitT = TraitBundleInt<
typename ExcludeTrait<TraitToRemove>::template Filter<Traits>...>;
// Pointer arithmetic is discouraged and disabled by default.
//
// Don't use directly, use AllowPtrArithmetic instead.
kAllowPtrArithmetic = (1 << 3),
// Adds accounting, on top of the chosen implementation, for test purposes.
// raw_ptr/raw_ref with this trait perform extra bookkeeping, e.g. to track
// the number of times the raw_ptr is wrapped, unwrapped, etc.
//
// Test only.
kUseCountingWrapperForTest = (1 << 4),
};
template <typename TraitBundle>
// Used to combine RawPtrTraits:
constexpr RawPtrTraits operator|(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) |
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator&(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) &
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator~(RawPtrTraits a) {
return static_cast<RawPtrTraits>(~static_cast<unsigned>(a));
}
namespace raw_ptr_traits {
constexpr bool Contains(RawPtrTraits a, RawPtrTraits b) {
return (a & b) != RawPtrTraits::kEmpty;
}
constexpr RawPtrTraits Remove(RawPtrTraits a, RawPtrTraits b) {
return a & ~b;
}
constexpr bool AreValid(RawPtrTraits traits) {
return Remove(traits, RawPtrTraits::kMayDangle |
RawPtrTraits::kDisableMTECheckedPtr |
RawPtrTraits::kDisableHooks |
RawPtrTraits::kAllowPtrArithmetic |
RawPtrTraits::kUseCountingWrapperForTest) ==
RawPtrTraits::kEmpty;
}
template <RawPtrTraits Traits>
struct TraitsToImpl;
} // namespace raw_ptr_traits
@ -346,17 +306,33 @@ struct MTECheckedPtrImpl {
// Wraps a pointer, and returns its uintptr_t representation.
template <typename T>
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
// Catch the obviously unsupported cases, e.g. `nullptr` or `-1ull`.
//
// `ExtractPtr(ptr)` should be functionally identical to `ptr` for
// the purposes of `EnabledForPtr()`, since we assert that `ptr` is
// an untagged raw pointer (there are no tag bits provided by
// MTECheckedPtr to strip off). However, something like `-1ull`
// looks identical to a fully tagged-up pointer. We'll add a check
// here just to make sure there's no difference in the support check
// whether extracted or not.
const bool extracted_supported =
PartitionAllocSupport::EnabledForPtr(ExtractPtr(ptr));
const bool raw_supported = PartitionAllocSupport::EnabledForPtr(ptr);
PA_BASE_DCHECK(extracted_supported == raw_supported);
// At the expense of consistency, we use the `raw_supported`
// condition. When wrapping a raw pointer, we assert that having set
// bits conflatable with the MTECheckedPtr tag disqualifies `ptr`
// from support.
if (!raw_supported) {
return ptr;
}
// Disambiguation: UntagPtr removes the hardware MTE tag, whereas this
// function is responsible for adding the software MTE tag.
uintptr_t addr = partition_alloc::UntagPtr(ptr);
PA_BASE_DCHECK(ExtractTag(addr) == 0ull);
// Return a not-wrapped |addr|, if it's either nullptr or if the protection
// for this pointer is disabled.
if (!PartitionAllocSupport::EnabledForPtr(ptr)) {
return ptr;
}
// Read the tag and place it in the top bits of the address.
// Even if PartitionAlloc's tag has less than kTagBits, we'll read
// what's given and pad the rest with 0s.
@ -407,19 +383,30 @@ struct MTECheckedPtrImpl {
return wrapped_ptr;
}
// Unwraps the pointer's uintptr_t representation, while asserting that memory
// hasn't been freed. The function must handle nullptr gracefully.
// Unwraps the pointer as a T*, without making an assertion on whether
// memory was freed or not.
template <typename T>
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
// SafelyUnwrapPtrForDereference handles nullptr case well.
return SafelyUnwrapPtrForDereference(wrapped_ptr);
// Return `wrapped_ptr` straightaway if protection is disabled, e.g.
// when `ptr` is `nullptr` or `uintptr_t{-1ull}`.
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
}
// Unwraps the pointer's uintptr_t representation, without making an assertion
// on whether memory was freed or not.
template <typename T>
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
return ExtractPtr(wrapped_ptr);
// Return `wrapped_ptr` straightaway if protection is disabled, e.g.
// when `ptr` is `nullptr` or `uintptr_t{-1ull}`.
T* extracted_ptr = ExtractPtr(wrapped_ptr);
if (!PartitionAllocSupport::EnabledForPtr(extracted_ptr)) {
return wrapped_ptr;
}
return extracted_ptr;
}
// Upcasts the wrapped pointer.
@ -522,10 +509,12 @@ struct MTECheckedPtrImpl {
// wrapped, unrwapped, etc.
//
// Test only.
template <typename Traits>
template <RawPtrTraits Traits>
struct RawPtrCountingImplWrapperForTest
: public raw_ptr_traits::TraitsToImpl<Traits>::Impl {
static_assert(!Traits::kUseCountingWrapperForTest);
static_assert(
!raw_ptr_traits::Contains(Traits,
RawPtrTraits::kUseCountingWrapperForTest));
using SuperImpl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -685,29 +674,34 @@ struct IsSupportedType<T,
#undef PA_WINDOWS_HANDLE_TYPE
#endif
template <typename Traits>
template <RawPtrTraits Traits>
struct TraitsToImpl {
static_assert(AreValid(Traits), "Unknown raw_ptr trait(s)");
private:
// UnderlyingImpl is the struct that provides the implementation of the
// protections related to raw_ptr.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
using UnderlyingImpl = internal::RawPtrBackupRefImpl<
/*AllowDangling=*/Traits::kMayDangle>;
/*allow_dangling=*/Contains(Traits, RawPtrTraits::kMayDangle)>;
#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
using UnderlyingImpl =
std::conditional_t<Traits::kMayDangle,
using UnderlyingImpl = std::conditional_t<
Contains(Traits, RawPtrTraits::kMayDangle),
// No special bookkeeping required for this case,
// just treat these as ordinary pointers.
internal::RawPtrNoOpImpl,
internal::RawPtrAsanUnownedImpl>;
internal::RawPtrAsanUnownedImpl<
Contains(Traits, RawPtrTraits::kAllowPtrArithmetic)>>;
#elif PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
using UnderlyingImpl =
std::conditional_t<Traits::kDisableMTECheckedPtr,
std::conditional_t<Contains(Traits, RawPtrTraits::kDisableMTECheckedPtr),
internal::RawPtrNoOpImpl,
internal::MTECheckedPtrImpl<
internal::MTECheckedPtrImplPartitionAllocSupport>>;
#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
using UnderlyingImpl = std::conditional_t<Traits::kDisableHooks,
using UnderlyingImpl =
std::conditional_t<Contains(Traits, RawPtrTraits::kDisableHooks),
internal::RawPtrNoOpImpl,
internal::RawPtrHookableImpl>;
#else
@ -720,9 +714,9 @@ struct TraitsToImpl {
// Impl may be different from UnderlyingImpl, because it may include a
// wrapper.
using Impl = std::conditional_t<
Traits::kUseCountingWrapperForTest,
Contains(Traits, RawPtrTraits::kUseCountingWrapperForTest),
internal::RawPtrCountingImplWrapperForTest<
typename Traits::template RemoveTraitT<UseCountingWrapperForTest>>,
Remove(Traits, RawPtrTraits::kUseCountingWrapperForTest)>,
UnderlyingImpl>;
};
@ -754,13 +748,11 @@ struct TraitsToImpl {
// non-default move constructor/assignment. Thus, it's possible to get an error
// where the pointer is not actually dangling, and have to work around the
// compiler. We have not managed to construct such an example in Chromium yet.
template <typename T, typename Traits = raw_ptr_traits::TraitBundle<>>
template <typename T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// Type to return from ExtractAsDangling(), which is identical except
// MayDangle trait is added (if one isn't there already).
using DanglingRawPtrType =
raw_ptr<T,
typename Traits::template AddTraitT<raw_ptr_traits::MayDangle>>;
// kMayDangle trait is added (if one isn't there already).
using DanglingRawPtrType = raw_ptr<T, Traits | RawPtrTraits::kMayDangle>;
public:
using Impl = typename raw_ptr_traits::TraitsToImpl<Traits>::Impl;
@ -842,17 +834,15 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR)
template <
typename PassedTraits,
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
template <RawPtrTraits PassedTraits,
typename Unused = std::enable_if_t<Traits != PassedTraits>>
PA_ALWAYS_INLINE explicit raw_ptr(const raw_ptr<T, PassedTraits>& p) noexcept
: wrapped_ptr_(Impl::WrapRawPtrForDuplication(
raw_ptr_traits::TraitsToImpl<PassedTraits>::Impl::
UnsafelyUnwrapPtrForDuplication(p.wrapped_ptr_))) {}
template <
typename PassedTraits,
typename Unused = std::enable_if_t<!std::is_same_v<Traits, PassedTraits>>>
template <RawPtrTraits PassedTraits,
typename Unused = std::enable_if_t<Traits != PassedTraits>>
PA_ALWAYS_INLINE raw_ptr& operator=(
const raw_ptr<T, PassedTraits>& p) noexcept {
Impl::ReleaseWrappedPtr(wrapped_ptr_);
@ -1004,20 +994,23 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
return *this += -delta_elems;
}
template <
typename Z,
typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>>
// Do not disable operator+() and operator-().
// They provide OOB checks. Keep them enabled, which may be blocked later when
// attempting to apply the += or -= operation, when disabled. In the absence
// of operators +/-, the compiler is free to implicitly convert to the
// underlying T* representation and perform ordinary pointer arithmetic, thus
// invalidating the purpose behind disabling them.
template <typename Z>
friend PA_ALWAYS_INLINE raw_ptr operator+(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p;
return result += delta_elems;
}
template <
typename Z,
typename = std::enable_if_t<partition_alloc::internal::offset_type<Z>>>
template <typename Z>
friend PA_ALWAYS_INLINE raw_ptr operator-(const raw_ptr& p, Z delta_elems) {
raw_ptr result = p;
return result -= delta_elems;
}
friend PA_ALWAYS_INLINE ptrdiff_t operator-(const raw_ptr& p1,
const raw_ptr& p2) {
return Impl::GetDeltaElems(p1.wrapped_ptr_, p2.wrapped_ptr_);
@ -1086,22 +1079,22 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// `raw_ptr` and `raw_ptr<U>` in the friend declaration itself does not work,
// because a comparison operator defined inline would not be allowed to call
// `raw_ptr<U>`'s private `GetForComparison()` method.
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
template <typename U, typename V, typename R1, typename R2>
template <typename U, typename V, RawPtrTraits R1, RawPtrTraits R2>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, R1>& lhs,
const raw_ptr<V, R2>& rhs);
@ -1211,41 +1204,41 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
// #union, #global-scope, #constexpr-ctor-field-initializer
RAW_PTR_EXCLUSION T* wrapped_ptr_;
template <typename U, typename R>
template <typename U, base::RawPtrTraits R>
friend class raw_ptr;
};
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() == rhs.GetForComparison();
}
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return !(lhs == rhs);
}
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() < rhs.GetForComparison();
}
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() > rhs.GetForComparison();
}
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() <= rhs.GetForComparison();
}
template <typename U, typename V, typename Traits1, typename Traits2>
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
const raw_ptr<V, Traits2>& rhs) {
return lhs.GetForComparison() >= rhs.GetForComparison();
@ -1254,7 +1247,7 @@ PA_ALWAYS_INLINE bool operator>=(const raw_ptr<U, Traits1>& lhs,
template <typename T>
struct IsRawPtr : std::false_type {};
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
struct IsRawPtr<raw_ptr<T, Traits>> : std::true_type {};
template <typename T>
@ -1263,9 +1256,9 @@ inline constexpr bool IsRawPtrV = IsRawPtr<T>::value;
template <typename T>
inline constexpr bool IsRawPtrMayDangleV = false;
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
Traits::kMayDangle;
raw_ptr_traits::Contains(Traits, RawPtrTraits::kMayDangle);
// Template helpers for working with T* or raw_ptr<T>.
template <typename T>
@ -1274,7 +1267,7 @@ struct IsPointer : std::false_type {};
template <typename T>
struct IsPointer<T*> : std::true_type {};
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
struct IsPointer<raw_ptr<T, Traits>> : std::true_type {};
template <typename T>
@ -1290,7 +1283,7 @@ struct RemovePointer<T*> {
using type = T;
};
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
struct RemovePointer<raw_ptr<T, Traits>> {
using type = T;
};
@ -1311,23 +1304,19 @@ using base::raw_ptr;
//
// When using it, please provide a justification about what guarantees that it
// will never be dereferenced after becoming dangling.
using DisableDanglingPtrDetection =
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
constexpr auto DisableDanglingPtrDetection = base::RawPtrTraits::kMayDangle;
// See `docs/dangling_ptr.md`
// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
// occurrences are meant to be removed. See https://crbug.com/1291138.
using DanglingUntriaged =
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>;
constexpr auto DanglingUntriaged = base::RawPtrTraits::kMayDangle;
// This type is to be used in callbacks arguments when it is known that they
// might receive dangling pointers. In any other cases, please use one of:
// - raw_ptr<T, DanglingUntriaged>
// - raw_ptr<T, DisableDanglingPtrDetection>
template <typename T>
using MayBeDangling = base::raw_ptr<
T,
base::raw_ptr_traits::TraitBundle<base::raw_ptr_traits::MayDangle>>;
template <typename T, base::RawPtrTraits Traits = base::RawPtrTraits::kEmpty>
using MayBeDangling = base::raw_ptr<T, Traits | base::RawPtrTraits::kMayDangle>;
// The following template parameters are only meaningful when `raw_ptr`
// is `MTECheckedPtr` (never the case unless a particular GN arg is set
@ -1342,25 +1331,18 @@ using MayBeDangling = base::raw_ptr<
// See `base/memory/raw_ptr_mtecheckedptr.md`
// Direct pass-through to no-op implementation.
using DegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::DisableMTECheckedPtr>;
constexpr auto DegradeToNoOpWhenMTE = base::RawPtrTraits::kDisableMTECheckedPtr;
// As above, but with the "untriaged dangling" annotation.
using DanglingUntriagedDegradeToNoOpWhenMTE = base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::MayDangle,
base::raw_ptr_traits::DisableMTECheckedPtr>;
// As above, but with the "explicitly disable protection" annotation.
using DisableDanglingPtrDetectionDegradeToNoOpWhenMTE =
base::raw_ptr_traits::TraitBundle<
base::raw_ptr_traits::MayDangle,
base::raw_ptr_traits::DisableMTECheckedPtr>;
// The use of pointer arithmetic with raw_ptr is strongly discouraged and
// disabled by default. Usually a container like span<> should be used
// instead of the raw_ptr.
constexpr auto AllowPtrArithmetic = base::RawPtrTraits::kAllowPtrArithmetic;
namespace std {
// Override so set/map lookups do not create extra raw_ptr. This also allows
// dangling pointers to be used for lookup.
template <typename T, typename Traits>
template <typename T, base::RawPtrTraits Traits>
struct less<raw_ptr<T, Traits>> {
using Impl = typename raw_ptr<T, Traits>::Impl;
using is_transparent = void;
@ -1385,7 +1367,7 @@ struct less<raw_ptr<T, Traits>> {
// Define for cases where raw_ptr<T> holds a pointer to an array of type T.
// This is consistent with definition of std::iterator_traits<T*>.
// Algorithms like std::binary_search need that.
template <typename T, typename Traits>
template <typename T, base::RawPtrTraits Traits>
struct iterator_traits<raw_ptr<T, Traits>> {
using difference_type = ptrdiff_t;
using value_type = std::remove_cv_t<T>;
@ -1394,6 +1376,33 @@ struct iterator_traits<raw_ptr<T, Traits>> {
using iterator_category = std::random_access_iterator_tag;
};
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ptr<T, Traits>> {
using pointer = ::raw_ptr<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ptr<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(&r);
}
static constexpr element_type* to_address(pointer p) noexcept {
return p.get();
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_H_

View File

@ -12,13 +12,34 @@
namespace base::internal {
PA_NO_SANITIZE("address")
bool RawPtrAsanUnownedImpl::EndOfAliveAllocation(const volatile void* ptr) {
bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr) {
uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return __asan_region_is_poisoned(reinterpret_cast<void*>(address), 1) &&
// Normally, we probe the first byte of an object, but in cases of pointer
// arithmetic, we may be probing subsequent bytes, including the legal
// "end + 1" position.
//
// Alas, ASAN will claim an unmapped page is unpoisoned, so willfully ignore
// the fist address of a page, since "end + 1" of an object allocated exactly
// up to a page boundary will SEGV on probe. This will cause false negatives
// for pointers that happen to be page aligned, which is undesirable but
// necessary for now.
//
// We minimize the consequences by using the pointer arithmetic flag in
// higher levels to conditionalize this suppression.
//
// TODO(tsepez): this may still fail for a non-accessible but non-null
// return from, say, malloc(0) which happens to be page-aligned.
//
// TODO(tsepez): enforce the pointer arithmetic flag. Until then, we
// may fail here if a pointer requires the flag but is lacking it.
return is_adjustable_ptr &&
((address & 0x0fff) == 0 ||
__asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) &&
!__asan_region_is_poisoned(reinterpret_cast<void*>(address - 1), 1);
}
bool RawPtrAsanUnownedImpl::LikelySmuggledScalar(const volatile void* ptr) {
bool LikelySmuggledScalar(const volatile void* ptr) {
intptr_t address = reinterpret_cast<intptr_t>(ptr);
return address < 0x4000; // Negative or small positive.
}

View File

@ -19,6 +19,10 @@
namespace base::internal {
bool EndOfAliveAllocation(const volatile void* ptr, bool is_adjustable_ptr);
bool LikelySmuggledScalar(const volatile void* ptr);
template <bool IsAdjustablePtr>
struct RawPtrAsanUnownedImpl {
// Wraps a pointer.
template <typename T>
@ -91,14 +95,11 @@ struct RawPtrAsanUnownedImpl {
template <typename T>
static void ProbeForLowSeverityLifetimeIssue(T* wrapped_ptr) {
if (wrapped_ptr && !LikelySmuggledScalar(wrapped_ptr) &&
!EndOfAliveAllocation(wrapped_ptr)) {
!EndOfAliveAllocation(wrapped_ptr, IsAdjustablePtr)) {
reinterpret_cast<const volatile uint8_t*>(wrapped_ptr)[0];
}
}
static bool EndOfAliveAllocation(const volatile void* ptr);
static bool LikelySmuggledScalar(const volatile void* ptr);
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T>

View File

@ -13,6 +13,7 @@
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -133,7 +134,7 @@ struct RawPtrBackupRefImpl {
#endif
AcquireInternal(address);
} else {
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
#if PA_HAS_BUILTIN(__builtin_constant_p)
// Similarly to `IsSupportedAndNotNull` above, elide the
// `BanSuperPageFromBRPPool` call if the compiler can prove that `address`
@ -148,7 +149,7 @@ struct RawPtrBackupRefImpl {
partition_alloc::internal::AddressPoolManagerBitmap::
BanSuperPageFromBRPPool(address);
}
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
}
return ptr;

View File

@ -0,0 +1,76 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
// Struct intended to be used with designated initializers and passed
// to the `CountersMatch()` matcher.
//
// `CountingImplType` isn't used directly; it tells the `CountersMatch`
// matcher which impl's static members should be checked.
template <typename CountingImplType>
struct CountingRawPtrExpectations {
absl::optional<int> wrap_raw_ptr_cnt;
absl::optional<int> release_wrapped_ptr_cnt;
absl::optional<int> get_for_dereference_cnt;
absl::optional<int> get_for_extraction_cnt;
absl::optional<int> get_for_comparison_cnt;
absl::optional<int> wrapped_ptr_swap_cnt;
absl::optional<int> wrapped_ptr_less_cnt;
absl::optional<int> pointer_to_member_operator_cnt;
absl::optional<int> wrap_raw_ptr_for_dup_cnt;
absl::optional<int> get_for_duplication_cnt;
};
#define REPORT_UNEQUAL_RAW_PTR_COUNTER(member_name, CounterClassImpl) \
{ \
if (arg.member_name.has_value() && \
arg.member_name.value() != CounterClassImpl::member_name) { \
*result_listener << "Expected `" #member_name "` to be " \
<< arg.member_name.value() << " but got " \
<< CounterClassImpl::member_name << "; "; \
result = false; \
} \
}
#define REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CounterClassImpl) \
{ \
result = true; \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(release_wrapped_ptr_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_dereference_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_extraction_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_comparison_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_swap_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrapped_ptr_less_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(pointer_to_member_operator_cnt, \
CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(wrap_raw_ptr_for_dup_cnt, CounterClassImpl) \
REPORT_UNEQUAL_RAW_PTR_COUNTER(get_for_duplication_cnt, CounterClassImpl) \
}
// Matcher used with `CountingRawPtr`. Provides slightly shorter
// boilerplate for verifying counts. This inner function is detached
// from the `MATCHER` to isolate the templating.
template <typename CountingImplType>
bool CountersMatchImpl(const CountingRawPtrExpectations<CountingImplType>& arg,
testing::MatchResultListener* result_listener) {
bool result = true;
REPORT_UNEQUAL_RAW_PTR_COUNTERS(result, CountingImplType);
return result;
}
// Implicit `arg` has type `CountingRawPtrExpectations`, specialized for
// the specific counting impl.
MATCHER(CountersMatch, "counting impl has specified counters") {
return CountersMatchImpl(arg, result_listener);
}
#undef REPORT_UNEQUAL_RAW_PTR_COUNTERS
#undef REPORT_UNEQUAL_RAW_PTR_COUNTER
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_PTR_TEST_SUPPORT_H_

View File

@ -17,7 +17,7 @@
namespace base {
template <class T, typename Traits>
template <class T, RawPtrTraits Traits>
class raw_ref;
namespace internal {
@ -25,7 +25,7 @@ namespace internal {
template <class T>
struct is_raw_ref : std::false_type {};
template <class T, typename Traits>
template <class T, RawPtrTraits Traits>
struct is_raw_ref<::base::raw_ref<T, Traits>> : std::true_type {};
template <class T>
@ -53,7 +53,7 @@ constexpr inline bool is_raw_ref_v = is_raw_ref<T>::value;
// Unlike a native `T&` reference, a mutable `raw_ref<T>` can be changed
// independent of the underlying `T`, similar to `std::reference_wrapper`. That
// means the reference inside it can be moved and reassigned.
template <class T, typename Traits = raw_ptr_traits::TraitBundle<>>
template <class T, RawPtrTraits Traits = RawPtrTraits::kEmpty>
class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// operator* is used with the expectation of GetForExtraction semantics:
//
@ -63,9 +63,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// The implementation of operator* provides GetForDereference semantics, and
// this results in spurious crashes in BRP-ASan builds, so we need to disable
// hooks that provide BRP-ASan instrumentation for raw_ref.
using Inner = raw_ptr<
T,
typename Traits::template AddTraitT<raw_ptr_traits::DisableHooks>>;
using Inner = raw_ptr<T, Traits | RawPtrTraits::kDisableHooks>;
public:
using Impl = typename Inner::Impl;
@ -81,7 +79,8 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
internal::MTECheckedPtrImplPartitionAllocSupport>> ||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
@ -98,24 +97,24 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
raw_ref& operator=(const T&& p) = delete;
PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
}
PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) {
p.inner_ = nullptr;
}
}
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_);
return *this;
}
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) {
p.inner_ = nullptr;
@ -128,14 +127,14 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
: inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
}
// Deliberately implicit in order to support implicit upcast.
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
: inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) {
p.inner_ = nullptr;
}
@ -149,13 +148,13 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// Upcast assignment
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_);
return *this;
}
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) {
p.inner_ = nullptr;
@ -164,7 +163,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
}
PA_ALWAYS_INLINE T& operator*() const {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator*();
}
@ -173,12 +172,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// used in place of operator*() when the memory referred to by the reference
// is not immediately going to be accessed.
PA_ALWAYS_INLINE T& get() const {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return *inner_.get();
}
PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
PA_RAW_PTR_CHECK(inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator->();
}
@ -191,123 +190,142 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
}
friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
swap(lhs.inner_, rhs.inner_);
}
template <class U>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ == rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ != rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <class U>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs,
const raw_ref<U, Traits>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs);
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ == &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ != &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ < &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ > &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ <= &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ >= &rhs;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs == rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs != rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs < rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs > rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs <= rhs.inner_;
}
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_.get()); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs >= rhs.inner_;
}
private:
template <class U, typename R>
template <class U, RawPtrTraits R>
friend class raw_ref;
Inner inner_;
};
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ == rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ != rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ < rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ > rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ <= rhs.inner_;
}
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return lhs.inner_ >= rhs.inner_;
}
// CTAD deduction guide.
template <class T>
raw_ref(T&) -> raw_ref<T>;
@ -318,7 +336,7 @@ raw_ref(const T&) -> raw_ref<const T>;
template <typename T>
struct IsRawRef : std::false_type {};
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
struct IsRawRef<raw_ref<T, Traits>> : std::true_type {};
template <typename T>
@ -329,7 +347,7 @@ struct RemoveRawRef {
using type = T;
};
template <typename T, typename Traits>
template <typename T, RawPtrTraits Traits>
struct RemoveRawRef<raw_ref<T, Traits>> {
using type = T;
};
@ -345,7 +363,7 @@ namespace std {
// Override so set/map lookups do not create extra raw_ref. This also
// allows C++ references to be used for lookup.
template <typename T, typename Traits>
template <typename T, base::RawPtrTraits Traits>
struct less<raw_ref<T, Traits>> {
using Impl = typename raw_ref<T, Traits>::Impl;
using is_transparent = void;
@ -367,6 +385,37 @@ struct less<raw_ref<T, Traits>> {
}
};
#if defined(_LIBCPP_VERSION)
// Specialize std::pointer_traits. The latter is required to obtain the
// underlying raw pointer in the std::to_address(pointer) overload.
// Implementing the pointer_traits is the standard blessed way to customize
// `std::to_address(pointer)` in C++20 [3].
//
// [1] https://wg21.link/pointer.traits.optmem
template <typename T, ::base::RawPtrTraits Traits>
struct pointer_traits<::raw_ref<T, Traits>> {
using pointer = ::raw_ref<T, Traits>;
using element_type = T;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = ::raw_ref<U, Traits>;
static constexpr pointer pointer_to(element_type& r) noexcept {
return pointer(r);
}
static constexpr element_type* to_address(pointer p) noexcept {
// `raw_ref::get` is used instead of raw_ref::operator*`. It provides
// GetForExtraction rather rather than GetForDereference semantics (see
// raw_ptr.h). This should be used when we we don't know the memory will be
// accessed.
return &(p.get());
}
};
#endif // defined(_LIBCPP_VERSION)
} // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_POINTERS_RAW_REF_H_

View File

@ -4,14 +4,16 @@
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
namespace partition_alloc::internal {
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
ReservationOffsetTable::_PaddedReservationOffsetTables
ReservationOffsetTable::padded_reservation_offset_tables_ PA_PKEY_ALIGN;
#else
ReservationOffsetTable::_ReservationOffsetTable
ReservationOffsetTable::reservation_offset_table_;
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
} // namespace partition_alloc::internal

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/tagging.h"
@ -67,7 +66,7 @@ static constexpr uint16_t kOffsetTagNormalBuckets =
// granularity is kSuperPageSize.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
public:
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// There is one reservation offset table per Pool in 64-bit mode.
static constexpr size_t kReservationOffsetTableCoverage = kPoolMaxSize;
static constexpr size_t kReservationOffsetTableLength =
@ -78,7 +77,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull;
static constexpr size_t kReservationOffsetTableLength =
4 * kGiB / kSuperPageSize;
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
static_assert(kReservationOffsetTableLength < kOffsetTagNormalBuckets,
"Offsets should be smaller than kOffsetTagNormalBuckets.");
@ -95,7 +94,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
offset = kOffsetTagNotAllocated;
}
};
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// If pkey support is enabled, we need to pkey-tag the tables of the pkey
// pool. For this, we need to pad the tables so that the pkey ones start on a
// page boundary.
@ -109,12 +108,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
#else
// A single table for the entire 32-bit address space.
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
};
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) {
PA_DCHECK(0 < handle && handle <= kNumPools);
PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools);
return ReservationOffsetTable::padded_reservation_offset_tables_
.tables[handle - 1]
.offsets;
@ -144,7 +143,7 @@ PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(pool_handle pool,
ReservationOffsetTable::kReservationOffsetTableLength);
return GetReservationOffsetTable(pool) + table_index;
}
#else
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(uintptr_t address) {
return ReservationOffsetTable::reservation_offset_table_.offsets;
}
@ -154,10 +153,10 @@ PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
return ReservationOffsetTable::reservation_offset_table_.offsets +
ReservationOffsetTable::kReservationOffsetTableLength;
}
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, find the owning Pool and compute the offset from its base.
auto [pool, offset] = GetPoolAndOffset(address);
return ReservationOffsetPointer(pool, offset);
@ -200,13 +199,13 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded
// inside another macro (PA_DCHECK).
#if !PA_CONFIG(HAS_64_BITS_POINTERS)
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr size_t kBRPOffset =
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
#else
constexpr size_t kBRPOffset = 0ull;
#endif // !PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // !BUILDFLAG(HAS_64_BIT_POINTERS)
// Make sure the reservation start is in the same pool as |address|.
// In the 32-bit mode, the beginning of a reservation may be excluded
// from the BRP pool, so shift the pointer. The other pools don't have
@ -227,7 +226,7 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
return reservation_start;
}
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// If the given address doesn't point to direct-map allocated memory,
// returns 0.
// This variant has better performance than the regular one on 64-bit builds if
@ -247,7 +246,7 @@ GetDirectMapReservationStart(uintptr_t address,
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
return reservation_start;
}
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// Returns true if |address| is the beginning of the first super page of a
// reservation, i.e. either a normal bucket super page, or the first super page

View File

@ -10,12 +10,11 @@
#include "base/allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/types/strong_alias.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(USE_STARSCAN)
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif
@ -208,7 +207,7 @@ BASE_EXPORT void ConfigurePartitions(
AddDummyRefCount add_dummy_ref_count,
AlternateBucketDistribution use_alternate_bucket_distribution);
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
#endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -22,7 +22,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h"
@ -713,7 +712,7 @@ void ConfigurePartitions(
}
}
#if PA_CONFIG(ALLOW_PCSCAN)
#if BUILDFLAG(USE_STARSCAN)
void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&::base::PlatformThread::SetName);
@ -730,7 +729,7 @@ void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
base::internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
base::internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
}
#endif // PA_CONFIG(ALLOW_PCSCAN)
#endif // BUILDFLAG(USE_STARSCAN)
#if BUILDFLAG(IS_WIN)
// Call this as soon as possible during startup.

View File

@ -30,13 +30,14 @@ ThreadSafePartitionRoot& PCScanMetadataAllocator() {
return *allocator;
}
// TODO(tasak): investigate whether PartitionAlloc tests really need this
// function or not. If we found no tests need, remove it.
void ReinitPCScanMetadataAllocatorForTesting() {
// First, purge memory owned by PCScanMetadataAllocator.
PCScanMetadataAllocator().PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages);
// Then, reinit the allocator.
PCScanMetadataAllocator().~PartitionRoot();
memset(&PCScanMetadataAllocator(), 0, sizeof(PCScanMetadataAllocator()));
PCScanMetadataAllocator().ResetForTesting(true); // IN-TEST
PCScanMetadataAllocator().Init(kConfig);
}

View File

@ -34,6 +34,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -613,14 +614,14 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
// First, check if |maybe_ptr| points to a valid super page or a quarantined
// card.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Check if |maybe_ptr| points to a quarantined card.
if (PA_LIKELY(
!QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
return nullptr;
}
#else
#else // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
// Without the card table, use the reservation offset table to check if
// |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
// we may have hit the slow path more frequently), but reduces the memory
@ -634,11 +635,11 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
return nullptr;
}
#endif // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
#else // PA_CONFIG(HAS_64_BITS_POINTERS)
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
return nullptr;
}
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// We are certain here that |maybe_ptr| points to an allocated super-page.
return StateBitmapFromAddr(maybe_ptr);
@ -777,14 +778,14 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
size_t quarantine_size() const { return quarantine_size_; }
private:
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
return PartitionAddressSpace::RegularPoolBase();
}
PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
return PartitionAddressSpace::RegularPoolBaseMask();
}
#endif // PA_CONFIG(HAS_64_BITS_POINTERS)
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
// |maybe_ptr| may have an MTE tag, so remove it first.
@ -1289,7 +1290,7 @@ PCScanInternal::~PCScanInternal() = default;
void PCScanInternal::Initialize(PCScan::InitConfig config) {
PA_DCHECK(!is_initialized_);
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// Make sure that pools are initialized.
PartitionAddressSpace::Init();
#endif

View File

@ -9,6 +9,7 @@
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
@ -93,12 +94,12 @@ template <typename Derived>
void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::RegularPoolMask() & kPtrUntagMask;
const uintptr_t base = Derived::RegularPoolBase();
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
for (; begin < end; begin += sizeof(uintptr_t)) {
// Read the region word-by-word. Everything that we read is a potential
// pointer to or inside an object on heap. Such an object should be
@ -106,13 +107,13 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
//
// Keep it MTE-untagged. See DisableMTEScope for details.
const uintptr_t maybe_ptr = *reinterpret_cast<uintptr_t*>(begin);
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
if (PA_LIKELY((maybe_ptr & mask) != base))
continue;
#else
if (!maybe_ptr)
continue;
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
derived().CheckPointer(maybe_ptr);
}
}

View File

@ -8,6 +8,7 @@
#include <limits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"
@ -135,12 +136,12 @@ namespace {
} // namespace
void Stack::IteratePointers(StackVisitor* visitor) const {
#if defined(PA_PCSCAN_STACK_SUPPORTED)
#if BUILDFLAG(PCSCAN_STACK_SUPPORTED)
PAPushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
IterateSafeStackIfNecessary(visitor);
#endif
#endif // BUILDFLAG(PCSCAN_STACK_SUPPORTED)
}
} // namespace partition_alloc::internal

View File

@ -355,7 +355,7 @@ void ThreadCache::RemoveTombstoneForTesting() {
// static
void ThreadCache::Init(PartitionRoot<>* root) {
#if BUILDFLAG(IS_NACL)
PA_IMMEDIATE_CRASH();
static_assert(false, "PartitionAlloc isn't supported for NaCl");
#endif
PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
ThreadCache::kLargeSizeThreshold);

View File

@ -27,7 +27,7 @@
#include "base/allocator/partition_allocator/partition_tls.h"
#include "build/build_config.h"
#if defined(ARCH_CPU_X86_64) && PA_CONFIG(HAS_64_BITS_POINTERS)
#if defined(ARCH_CPU_X86_64) && BUILDFLAG(HAS_64_BIT_POINTERS)
#include <algorithm>
#endif
@ -43,13 +43,13 @@ namespace tools {
//
// These two values were chosen randomly, and in particular neither is a valid
// pointer on most 64 bit architectures.
#if PA_CONFIG(HAS_64_BITS_POINTERS)
#if BUILDFLAG(HAS_64_BIT_POINTERS)
constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
#else
constexpr uintptr_t kNeedle1 = 0xe69e32f3;
constexpr uintptr_t kNeedle2 = 0x9615ee1c;
#endif
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
// This array contains, in order:
// - kNeedle1
@ -161,14 +161,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
internal::base::TimeDelta periodic_purge_next_interval_ =
kDefaultPurgeInterval;
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
uint8_t largest_active_bucket_index_ = 1;
#else
uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
ThreadCacheLimits::kDefaultSizeThreshold);
#endif
};
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
@ -392,15 +386,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
static void SetGlobalLimits(PartitionRoot<>* root, float multiplier);
#if BUILDFLAG(IS_NACL)
// The thread cache is never used with NaCl, but its compiler doesn't
// understand enough constexpr to handle the code below.
static constexpr uint16_t kBucketCount = 1;
#else
static constexpr uint16_t kBucketCount =
internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
1;
#endif
static_assert(
kBucketCount < internal::kNumBuckets,
"Cannot have more cached buckets than what the allocator supports");
@ -547,7 +535,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
internal::PartitionFreelistEntry* entry = bucket.freelist_head;
// TODO(lizeb): Consider removing once crbug.com/1382658 is fixed.
#if BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) && \
PA_CONFIG(HAS_64_BITS_POINTERS)
BUILDFLAG(HAS_64_BIT_POINTERS)
// x86_64 architecture now supports 57 bits of address space, as of Ice Lake
// for Intel. However Chrome OS systems do not ship with kernel support for
// it, but with 48 bits, so all canonical addresses have the upper 16 bits
@ -555,7 +543,8 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// by the kernel).
constexpr uintptr_t kCanonicalPointerMask = (1ULL << 48) - 1;
PA_CHECK(!(reinterpret_cast<uintptr_t>(entry) & ~kCanonicalPointerMask));
#endif
#endif // BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_X86_64) &&
// BUILDFLAG(HAS_64_BIT_POINTERS)
// Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to
@ -578,7 +567,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
uintptr_t slot_start) {
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
PA_CONFIG(HAS_64_BITS_POINTERS)
BUILDFLAG(HAS_64_BIT_POINTERS)
// We see freelist corruption crashes happening in the wild. These are likely
// due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
// somewhere in the code.
@ -630,7 +619,7 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
address_aligned += 4;
}
#endif // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
// PA_CONFIG(HAS_64_BITS_POINTERS)
// BUILDFLAG(HAS_64_BIT_POINTERS)
auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
slot_start, bucket.freelist_head);

View File

@ -14,13 +14,12 @@
// other hyper-thread on this core. See the following for context:
// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
#if BUILDFLAG(IS_NACL)
// Inline assembly not allowed.
#define PA_YIELD_PROCESSOR ((void)0)
#elif PA_CONFIG(IS_NONCLANG_MSVC)
#if PA_CONFIG(IS_NONCLANG_MSVC)
// MSVC is in its own assemblyless world (crbug.com/1351310#c6).
#include <windows.h>
#define PA_YIELD_PROCESSOR (YieldProcessor())
#else
#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
@ -47,6 +46,6 @@
#define PA_YIELD_PROCESSOR ((void)0)
#endif
#endif // BUILDFLAG(IS_NACL)
#endif // PA_CONFIG(IS_NONCLANG_MSVC)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_YIELD_PROCESSOR_H_

View File

@ -48,12 +48,17 @@ source_set("jni_sample_native_side") {
]
}
generate_jni_registration("jni_registration") {
targets = [ ":jni_sample_java" ]
manual_jni_registration = true
}
shared_library("jni_sample_lib") {
sources = [ "sample_entry_point.cc" ]
deps = [
":jni_registration",
":jni_sample_native_side",
":sample_jni_apk__final_jni", # For registration_header
"//base",
]
}
@ -63,7 +68,6 @@ android_apk("sample_jni_apk") {
android_manifest = "AndroidManifest.xml"
deps = [ ":jni_sample_java" ]
shared_libraries = [ ":jni_sample_lib" ]
manual_jni_registration = true
}
# Serves to test that generated bindings compile properly.

View File

@ -175,6 +175,16 @@ public class AnimationFrameTimeHistogramTest {
If a native method is called without setting a mock in a unit test, an
`UnsupportedOperationException` will be thrown.
#### Special case: DFMs
DFMs have their own generated `GEN_JNI`s, which are `<module_name>_GEN_JNI`. In
order to get your DFM's JNI to use the `<module_name>` prefix, you must add your
module name into the argument of the `@NativeMethods` annotation.
So, for example, say your module was named `test_module`. You would annotate
your `Natives` interface with `@NativeMethods("test_module")`, and this would
result in `test_module_GEN_JNI`.
### Testing for readiness: use `get()`
JNI Generator automatically produces checks that verify that the Natives interface can be safely

View File

@ -1,13 +1,10 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests."""
from __future__ import print_function
import argparse
import base64
import collections
@ -62,7 +59,7 @@ _EXTRACT_METHODS_REGEX = re.compile(
flags=re.DOTALL)
_NATIVE_PROXY_EXTRACTION_REGEX = re.compile(
r'@NativeMethods[\S\s]+?interface\s*'
r'@NativeMethods(?:\(\s*"(?P<module_name>\w+)"\s*\))?[\S\s]+?interface\s*'
r'(?P<interface_name>\w*)\s*(?P<interface_body>{(\s*.*)+?\s*})')
# Use 100 columns rather than 80 because it makes many lines more readable.
@ -842,9 +839,12 @@ class JNIFromJavaP(object):
self.constant_fields.append(
ConstantField(name=match.group('name'), value=value.group('value')))
# We pass in an empty string for the module (which will make the JNI use the
# base module's files) for all javap-derived JNI. There may be a way to get
# the module from a jar file, but it's not needed right now.
self.inl_header_file_generator = InlHeaderFileGenerator(
self.namespace, self.fully_qualified_class, [], self.called_by_natives,
self.constant_fields, self.jni_params, options)
'', self.namespace, self.fully_qualified_class, [],
self.called_by_natives, self.constant_fields, self.jni_params, options)
def GetContent(self):
return self.inl_header_file_generator.GetContent()
@ -875,17 +875,21 @@ class ProxyHelpers(object):
MAX_CHARS_FOR_HASHED_NATIVE_METHODS = 8
@staticmethod
def GetClass(use_hash):
return 'N' if use_hash else 'GEN_JNI'
def GetClass(short_name, name_prefix=None):
if not name_prefix:
name_prefix = ''
else:
name_prefix += '_'
return name_prefix + ('N' if short_name else 'GEN_JNI')
@staticmethod
def GetPackage(use_hash):
return 'J' if use_hash else 'org/chromium/base/natives'
def GetPackage(short_name):
return 'J' if short_name else 'org/chromium/base/natives'
@staticmethod
def GetQualifiedClass(use_hash):
return '%s/%s' % (ProxyHelpers.GetPackage(use_hash),
ProxyHelpers.GetClass(use_hash))
def GetQualifiedClass(short_name, name_prefix=None):
return '%s/%s' % (ProxyHelpers.GetPackage(short_name),
ProxyHelpers.GetClass(short_name, name_prefix))
@staticmethod
def CreateHashedMethodName(fully_qualified_class_name, method_name):
@ -934,8 +938,18 @@ class ProxyHelpers(object):
ptr_type,
include_test_only=True):
methods = []
first_match = True
module_name = None
for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents):
interface_body = match.group('interface_body')
if first_match:
module_name = match.group('module_name')
first_match = False
else:
assert module_name == match.group(
'module_name'
), 'JNI cannot belong to two modules in one file {} and {}'.format(
module_name, match.group('module_name'))
for method in _EXTRACT_METHODS_REGEX.finditer(interface_body):
name = method.group('name')
if not include_test_only and _NameIsTestOnly(name):
@ -961,7 +975,9 @@ class ProxyHelpers(object):
ptr_type=ptr_type)
methods.append(native)
return methods
if not module_name:
module_name = ''
return methods, module_name
class JNIFromJavaSource(object):
@ -972,20 +988,19 @@ class JNIFromJavaSource(object):
self.jni_params = JniParams(fully_qualified_class)
self.jni_params.ExtractImportsAndInnerClasses(contents)
jni_namespace = ExtractJNINamespace(contents) or options.namespace
natives = ExtractNatives(contents, options.ptr_type)
called_by_natives = ExtractCalledByNatives(self.jni_params, contents,
options.always_mangle)
natives += ProxyHelpers.ExtractStaticProxyNatives(fully_qualified_class,
contents,
options.ptr_type)
natives, module_name = ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class, contents, options.ptr_type)
natives += ExtractNatives(contents, options.ptr_type)
if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError(
'Unable to find any JNI methods for %s.' % fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator(
jni_namespace, fully_qualified_class, natives, called_by_natives, [],
self.jni_params, options)
module_name, jni_namespace, fully_qualified_class, natives,
called_by_natives, [], self.jni_params, options)
self.content = inl_header_file_generator.GetContent()
def GetContent(self):
@ -1005,11 +1020,13 @@ class HeaderFileGeneratorHelper(object):
def __init__(self,
class_name,
module_name,
fully_qualified_class,
use_proxy_hash,
split_name=None,
enable_jni_multiplexing=False):
self.class_name = class_name
self.module_name = module_name
self.fully_qualified_class = fully_qualified_class
self.use_proxy_hash = use_proxy_hash
self.split_name = split_name
@ -1031,8 +1048,8 @@ class HeaderFileGeneratorHelper(object):
method_name = EscapeClassName(native.proxy_name)
return 'Java_%s_%s' % (EscapeClassName(
ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash
or self.enable_jni_multiplexing)), method_name)
self.use_proxy_hash or self.enable_jni_multiplexing,
self.module_name)), method_name)
template = Template('Java_${JAVA_NAME}_native${NAME}')
@ -1047,9 +1064,9 @@ class HeaderFileGeneratorHelper(object):
ret = collections.OrderedDict()
for entry in origin:
if isinstance(entry, NativeMethod) and entry.is_proxy:
use_hash = self.use_proxy_hash or self.enable_jni_multiplexing
ret[ProxyHelpers.GetClass(use_hash)] \
= ProxyHelpers.GetQualifiedClass(use_hash)
short_name = self.use_proxy_hash or self.enable_jni_multiplexing
ret[ProxyHelpers.GetClass(short_name, self.module_name)] \
= ProxyHelpers.GetQualifiedClass(short_name, self.module_name)
continue
ret[self.class_name] = self.fully_qualified_class
@ -1083,7 +1100,8 @@ const char kClassPath_${JAVA_CLASS}[] = \
# Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing):
self.use_proxy_hash or self.enable_jni_multiplexing,
self.module_name):
ret += [template.substitute(values)]
class_getter = """\
@ -1115,7 +1133,8 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
# Since all proxy methods use the same class, defining this in every
# header file would result in duplicated extern initializations.
if full_clazz != ProxyHelpers.GetQualifiedClass(
self.use_proxy_hash or self.enable_jni_multiplexing):
self.use_proxy_hash or self.enable_jni_multiplexing,
self.module_name):
ret += [template.substitute(values)]
return ''.join(ret)
@ -1124,7 +1143,7 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration."""
def __init__(self, namespace, fully_qualified_class, natives,
def __init__(self, module_name, namespace, fully_qualified_class, natives,
called_by_natives, constant_fields, jni_params, options):
self.namespace = namespace
self.fully_qualified_class = fully_qualified_class
@ -1137,6 +1156,7 @@ class InlHeaderFileGenerator(object):
self.options = options
self.helper = HeaderFileGeneratorHelper(
self.class_name,
module_name,
fully_qualified_class,
self.options.use_proxy_hash,
split_name=self.options.split_name,

View File

@ -12,14 +12,17 @@ file.
"""
import collections
import copy
import difflib
import inspect
import optparse
import os
import sys
import tempfile
import unittest
import jni_generator
import jni_registration_generator
import zipfile
from jni_generator import CalledByNative
from jni_generator import IsMainDexJavaClass
from jni_generator import NativeMethod
@ -44,7 +47,7 @@ def _RemoveHashedNames(natives):
return ret
class TestOptions(object):
class JniGeneratorOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
@ -54,7 +57,6 @@ class TestOptions(object):
self.ptr_type = 'long'
self.cpp = 'cpp'
self.javap = 'mock-javap'
self.native_exports_optional = True
self.enable_profiling = False
self.enable_tracing = False
self.use_proxy_hash = False
@ -65,6 +67,21 @@ class TestOptions(object):
self.include_test_only = True
class JniRegistrationGeneratorOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
self.sources_exclusions = []
self.namespace = None
self.enable_proxy_mocks = False
self.require_mocks = False
self.use_proxy_hash = False
self.enable_jni_multiplexing = False
self.manual_jni_registration = False
self.include_test_only = False
self.header_path = None
class BaseTest(unittest.TestCase):
@staticmethod
@ -100,10 +117,42 @@ class BaseTest(unittest.TestCase):
signature_to_cases[signature].extend(cases)
combined_dict[
'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls(
signature_to_cases, namespace)
signature_to_cases, '')
return combined_dict
def _TestEndToEndRegistration(self,
input_java_src_files,
options,
name_to_goldens,
header_golden=None):
with tempfile.TemporaryDirectory() as tdir:
options.srcjar_path = os.path.join(tdir, 'srcjar.jar')
if header_golden:
options.header_path = os.path.join(tdir, 'header.h')
input_java_paths = [
self._JoinScriptDir(os.path.join(_JAVA_SRC_DIR, f))
for f in input_java_src_files
]
jni_registration_generator._Generate(options, input_java_paths)
with zipfile.ZipFile(options.srcjar_path, 'r') as srcjar:
for name in srcjar.namelist():
self.assertTrue(
name in name_to_goldens,
f'Found {name} output, but not present in name_to_goldens map.')
contents = srcjar.read(name).decode('utf-8')
self.AssertGoldenTextEquals(contents,
golden_file=name_to_goldens[name])
if header_golden:
with open(options.header_path, 'r') as f:
# Temp directory will cause some diffs each time we run if we don't
# normalize.
contents = f.read().replace(
tdir.replace('/', '_').upper(), 'TEMP_DIR')
self.AssertGoldenTextEquals(contents, golden_file=header_golden)
def _JoinScriptDir(self, path):
script_dir = os.path.dirname(sys.argv[0])
return os.path.join(script_dir, path)
@ -123,7 +172,7 @@ class BaseTest(unittest.TestCase):
content = f.read()
opts = options
if opts is None:
opts = TestOptions()
opts = JniGeneratorOptions()
jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz,
opts)
@ -192,8 +241,8 @@ class BaseTest(unittest.TestCase):
if golden_file is None:
self.assertTrue(
caller.startswith('test'),
'AssertGoldenTextEquals can only be called from a '
'test* method, not %s' % caller)
'AssertGoldenTextEquals can only be called without at golden file '
'from a test* method, not %s' % caller)
golden_file = '%s%s.golden' % (caller, suffix)
golden_text = self._ReadGoldenFile(golden_file)
if os.environ.get(_REBASELINE_ENV):
@ -209,6 +258,7 @@ class BaseTest(unittest.TestCase):
self.AssertTextEquals(golden_text, generated_text)
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class TestGenerator(BaseTest):
def testInspectCaller(self):
@ -375,21 +425,21 @@ class TestGenerator(BaseTest):
java_class_name=None)
]
self.AssertListEquals(golden_natives, natives)
h1 = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
h1 = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator('',
'',
h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(),
'', '',
'org/chromium/TestJni',
natives,
jni_params,
True,
use_proxy_hash=False)
natives, jni_params,
True)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content, use_hash=False, manual_jni_registration=True),
reg_options, '', content),
suffix='Registrations')
def testInnerClassNatives(self):
@ -410,9 +460,9 @@ class TestGenerator(BaseTest):
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesMultiple(self):
@ -443,9 +493,9 @@ class TestGenerator(BaseTest):
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesBothInnerAndOuter(self):
@ -475,22 +525,22 @@ class TestGenerator(BaseTest):
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent())
h2 = jni_registration_generator.HeaderGenerator('',
'',
h2 = jni_registration_generator.DictionaryGenerator(JniGeneratorOptions(),
'', '',
'org/chromium/TestJni',
natives,
jni_params,
True,
use_proxy_hash=False)
natives, jni_params,
True)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content, use_hash=False, manual_jni_registration=True),
reg_options, '', content),
suffix='Registrations')
def testCalledByNatives(self):
@ -839,9 +889,9 @@ class TestGenerator(BaseTest):
),
]
self.AssertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [],
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni', [],
called_by_natives, [], jni_params,
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testCalledByNativeParseError(self):
@ -938,8 +988,8 @@ public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E>
Signature: ([Landroid/icu/text/DisplayContext;)V
}
"""
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testSnippnetJavap6_7_8(self):
@ -964,12 +1014,12 @@ public class java.util.HashSet {
}
"""
jni_from_javap6 = jni_generator.JNIFromJavaP(
content_javap6.split('\n'), TestOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP(
content_javap7.split('\n'), TestOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP(
content_javap8.split('\n'), TestOptions())
jni_from_javap6 = jni_generator.JNIFromJavaP(content_javap6.split('\n'),
JniGeneratorOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP(content_javap7.split('\n'),
JniGeneratorOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP(content_javap8.split('\n'),
JniGeneratorOptions())
self.assertTrue(jni_from_javap6.GetContent())
self.assertTrue(jni_from_javap7.GetContent())
self.assertTrue(jni_from_javap8.GetContent())
@ -983,16 +1033,16 @@ public class java.util.HashSet {
def testFromJavaP(self):
contents = self._ReadGoldenFile('testInputStream.javap')
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
JniGeneratorOptions())
self.assertEqual(10, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testConstantsFromJavaP(self):
for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']:
contents = self._ReadGoldenFile(f)
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'),
JniGeneratorOptions())
self.assertEqual(86, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
@ -1013,8 +1063,8 @@ public class java.util.HashSet {
private native void nativeSyncSetupEnded(
int nativeAndroidSyncSetupFlowHandler);
"""
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'foo/bar', TestOptions())
jni_from_java = jni_generator.JNIFromJavaSource(test_data, 'foo/bar',
JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testRaisesOnNonJNIMethod(self):
@ -1025,7 +1075,7 @@ public class java.util.HashSet {
}
"""
self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data,
'foo/bar', TestOptions())
'foo/bar', JniGeneratorOptions())
def testJniSelfDocumentingExample(self):
generated_text = self._CreateJniHeaderFromFile(
@ -1045,7 +1095,7 @@ public class java.util.HashSet {
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, ('com/google/lookhowextremelylongiam/snarf/'
'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'),
TestOptions())
JniGeneratorOptions())
jni_lines = jni_from_java.GetContent().split('\n')
line = next(
line for line in jni_lines if line.lstrip().startswith('#ifndef'))
@ -1113,7 +1163,7 @@ class Foo {
jni_params.JavaToJni('java/nio/ByteBuffer[]'))
def testNativesLong(self):
test_options = TestOptions()
test_options = JniGeneratorOptions()
test_options.ptr_type = 'long'
test_data = """"
private native void nativeDestroy(long nativeChromeBrowserProvider);
@ -1131,8 +1181,9 @@ class Foo {
ptr_type=test_options.ptr_type),
]
self.AssertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator(
'', 'org/chromium/TestJni', natives, [], [], jni_params, test_options)
h = jni_generator.InlHeaderFileGenerator('', '', 'org/chromium/TestJni',
natives, [], [], jni_params,
test_options)
self.AssertGoldenTextEquals(h.GetContent())
def testMainDexAnnotation(self):
@ -1210,8 +1261,7 @@ class Foo {
}
}
"""
options = TestOptions()
options.native_exports_optional = False
options = JniGeneratorOptions()
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/example/jni_generator/SampleForTests', options)
self.AssertGoldenTextEquals(jni_from_java.GetContent())
@ -1229,7 +1279,7 @@ class Foo {
def willRaise():
jni_generator.JNIFromJavaSource(test_data,
'org/chromium/media/VideoCaptureFactory',
TestOptions())
JniGeneratorOptions())
self.assertRaises(SyntaxError, willRaise)
@ -1249,7 +1299,7 @@ class Foo {
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testMultipleJNIAdditionalImport(self):
@ -1270,7 +1320,7 @@ class Foo {
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testTracing(self):
@ -1291,7 +1341,7 @@ class Foo {
static native void nativeStaticMethod();
}
"""
options_with_tracing = TestOptions()
options_with_tracing = JniGeneratorOptions()
options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1314,11 +1364,11 @@ class Foo {
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
JniGeneratorOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testSplitNameExample(self):
opts = TestOptions()
opts = JniGeneratorOptions()
opts.split_name = "sample"
generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'),
@ -1327,19 +1377,58 @@ class Foo {
generated_text, golden_file='SampleForTestsWithSplit_jni.golden')
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class ProxyTestGenerator(BaseTest):
def _BuildRegDictFromSample(self, options=None):
if options is None:
options = TestOptions()
def _BuildRegDictFromSample(self):
path = self._JoinScriptDir(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'))
reg_dict = jni_registration_generator._DictForPath(path)
reg_dict = jni_registration_generator._DictForPath(
JniRegistrationGeneratorOptions(), path)
reg_dict = self._MergeRegistrationForTests([reg_dict])
return reg_dict
def testEndToEndProxyHashed(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testEndToEndManualRegistration(self):
input_java_files = ['SampleForAnnotationProcessor.java']
options = JniRegistrationGeneratorOptions()
options.manual_jni_registration = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'SampleForAnnotationProcessorGenJni.golden'
}
self._TestEndToEndRegistration(
input_java_files,
options,
name_to_goldens,
header_golden='SampleForAnnotationProcessorManualJni.golden')
def testEndToEndProxyJniWithModules(self):
input_java_files = [
'SampleForAnnotationProcessor.java', 'SampleModule.java'
]
options = JniRegistrationGeneratorOptions()
options.use_proxy_hash = True
name_to_goldens = {
'org/chromium/base/natives/GEN_JNI.java':
'HashedSampleForAnnotationProcessorGenJni.2.golden',
'J/N.java': 'HashedSampleForAnnotationProcessorGenJni.golden',
'org/chromium/base/natives/module_GEN_JNI.java': 'ModuleGenJni.golden',
'J/module_N.java': 'ModuleJN.golden'
}
self._TestEndToEndRegistration(input_java_files, options, name_to_goldens)
def testProxyNativesWithNatives(self):
test_data = """
package org.chromium.foo;
@ -1362,7 +1451,7 @@ class ProxyTestGenerator(BaseTest):
}
"""
options_with_tracing = TestOptions()
options_with_tracing = JniGeneratorOptions()
options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing)
@ -1380,7 +1469,7 @@ class ProxyTestGenerator(BaseTest):
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
golden_natives = [
@ -1416,7 +1505,7 @@ class ProxyTestGenerator(BaseTest):
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', True)
golden_natives = [
@ -1452,7 +1541,7 @@ class ProxyTestGenerator(BaseTest):
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', False)
self.AssertListEquals(_RemoveHashedNames(natives), [])
@ -1481,9 +1570,10 @@ class ProxyTestGenerator(BaseTest):
}
"""
qualified_clazz = 'test/foo/Foo'
jni_params = TestOptions()
options = JniRegistrationGeneratorOptions()
options.manual_jni_registration = True
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
golden_natives = [
@ -1500,42 +1590,33 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(_RemoveHashedNames(natives), golden_natives)
jni_params = jni_generator.JniParams(qualified_clazz)
main_dex_header = jni_registration_generator.HeaderGenerator(
'',
'',
qualified_clazz,
natives,
jni_params,
main_dex=True,
use_proxy_hash=False).Generate()
main_dex_header = jni_registration_generator.DictionaryGenerator(
options, '', '', qualified_clazz, natives, jni_params,
main_dex=True).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content,
use_hash=False,
manual_jni_registration=True))
jni_registration_generator.CreateFromDict(options, '', content))
other_qualified_clazz = 'test/foo/Bar'
other_natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
other_natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
other_qualified_clazz, non_main_dex_test_data, 'long')
jni_params = jni_generator.JniParams(other_qualified_clazz)
non_main_dex_header = jni_registration_generator.HeaderGenerator(
non_main_dex_header = jni_registration_generator.DictionaryGenerator(
options,
'',
'',
other_qualified_clazz,
other_natives,
jni_params,
main_dex=False,
use_proxy_hash=False).Generate()
main_dex=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header] +
[non_main_dex_header])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content,
use_hash=False,
manual_jni_registration=True),
jni_registration_generator.CreateFromDict(options, '', content),
'AndNonMainDex')
def testProxyNatives(self):
@ -1575,9 +1656,9 @@ class ProxyTestGenerator(BaseTest):
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, _ = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
bad_spacing_natives = jni_generator.ProxyHelpers \
bad_spacing_natives, _ = jni_generator.ProxyHelpers \
.ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long')
golden_natives = [
NativeMethod(
@ -1616,34 +1697,32 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
self.AssertListEquals(golden_natives,
_RemoveHashedNames(bad_spacing_natives))
options = JniGeneratorOptions()
reg_options = JniRegistrationGeneratorOptions()
reg_options.manual_jni_registration = True
jni_params = jni_generator.JniParams(qualified_clazz)
h1 = jni_generator.InlHeaderFileGenerator('', qualified_clazz, natives, [],
[], jni_params, TestOptions())
h1 = jni_generator.InlHeaderFileGenerator('', '', qualified_clazz, natives,
[], [], jni_params, options)
self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator('',
'',
h2 = jni_registration_generator.DictionaryGenerator(reg_options, '', '',
qualified_clazz,
natives,
jni_params,
False,
use_proxy_hash=False)
natives, jni_params,
False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
proxy_opts = jni_registration_generator.ProxyOptions(
manual_jni_registration=True)
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(content, proxy_opts),
jni_registration_generator.CreateProxyJavaFromDict(
reg_options, '', content),
suffix='Java')
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
content,
proxy_opts.use_hash,
manual_jni_registration=proxy_opts.manual_jni_registration),
reg_options, '', content),
suffix='Registrations')
def testProxyHashedExample(self):
opts = TestOptions()
opts = JniGeneratorOptions()
opts.use_proxy_hash = True
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
@ -1654,20 +1733,6 @@ class ProxyTestGenerator(BaseTest):
generated_text,
golden_file='HashedSampleForAnnotationProcessor_jni.golden')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path), use_proxy_hash=True)
reg_dict = self._MergeRegistrationForTests([reg_dict])
proxy_opts = jni_registration_generator.ProxyOptions(use_hash=True)
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts),
golden_file='HashedSampleForAnnotationProcessorGenJni.golden')
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts, forwarding=True),
golden_file='HashedSampleForAnnotationProcessorGenJni.2.golden')
def testProxyJniExample(self):
generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'),
@ -1676,21 +1741,20 @@ class ProxyTestGenerator(BaseTest):
generated_text, golden_file='SampleForAnnotationProcessor_jni.golden')
def testGenJniFlags(self):
options = JniRegistrationGeneratorOptions()
reg_dict = self._BuildRegDictFromSample()
proxy_options = jni_registration_generator.ProxyOptions()
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'Disabled')
proxy_options = jni_registration_generator.ProxyOptions(enable_mocks=True)
options.enable_proxy_mocks = True
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'MocksEnabled')
proxy_options = jni_registration_generator.ProxyOptions(
enable_mocks=True, require_mocks=True)
options.require_mocks = True
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
options, '', reg_dict)
self.AssertGoldenTextEquals(content, 'MocksRequired')
def testProxyTypeInfoPreserved(self):
@ -1708,8 +1772,8 @@ class ProxyTestGenerator(BaseTest):
SomeJavaType[][] someObjects);
}
"""
natives = ProxyHelpers.ExtractStaticProxyNatives('org/chromium/foo/FooJni',
test_data, 'long')
natives, _ = ProxyHelpers.ExtractStaticProxyNatives(
'org/chromium/foo/FooJni', test_data, 'long')
golden_natives = [
NativeMethod(
static=True,
@ -1758,63 +1822,53 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
@unittest.skipIf(os.name == 'nt', 'Not intended to work on Windows')
class MultiplexTestGenerator(BaseTest):
options = JniRegistrationGeneratorOptions()
options.enable_jni_multiplexing = True
def testProxyMultiplexGenJni(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
self.options, self._JoinScriptDir(path))
reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts),
self.options, '', reg_dict),
golden_file='testProxyMultiplexGenJni.golden')
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(reg_dict,
proxy_opts,
jni_registration_generator.CreateProxyJavaFromDict(self.options,
'',
reg_dict,
forwarding=True),
golden_file='testProxyMultiplexGenJni.2.golden')
def testProxyMultiplexNatives(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
self.options, self._JoinScriptDir(path))
reg_dict = self._MergeRegistrationForTests([reg_dict],
enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
reg_dict,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing),
self.options, '', reg_dict),
golden_file='testProxyMultiplexNatives.golden')
def testProxyMultiplexNativesRegistration(self):
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
reg_dict_for_registration = jni_registration_generator._DictForPath(
self._JoinScriptDir(path),
enable_jni_multiplexing=True,
namespace='test')
self.options, self._JoinScriptDir(path))
reg_dict_for_registration = self._MergeRegistrationForTests(
[reg_dict_for_registration], enable_jni_multiplexing=True)
proxy_opts = jni_registration_generator.ProxyOptions(
enable_jni_multiplexing=True)
new_options = copy.copy(self.options)
new_options.manual_jni_registration = True
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(
reg_dict_for_registration,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=True),
jni_registration_generator.CreateFromDict(new_options, '',
reg_dict_for_registration),
golden_file='testProxyMultiplexNativesRegistration.golden')

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@ -37,12 +37,7 @@ MERGEABLE_KEYS = [
]
def _Generate(java_file_paths,
srcjar_path,
proxy_opts,
header_path=None,
namespace='',
include_test_only=True):
def _Generate(options, java_file_paths):
"""Generates files required to perform JNI registration.
Generates a srcjar containing a single class, GEN_JNI, that contains all
@ -53,92 +48,92 @@ def _Generate(java_file_paths,
JNI registration.
Args:
options: arguments from the command line
java_file_paths: A list of java file paths.
srcjar_path: Path to the GEN_JNI srcjar.
header_path: If specified, generates a header file in this location.
namespace: If specified, sets the namespace for the generated header file.
"""
# Without multiprocessing, script takes ~13 seconds for chrome_public_apk
# on a z620. With multiprocessing, takes ~2 seconds.
results = []
results = collections.defaultdict(list)
with multiprocessing.Pool() as pool:
for d in pool.imap_unordered(
functools.partial(
_DictForPath,
use_proxy_hash=proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
namespace=namespace,
include_test_only=include_test_only), java_file_paths):
for d in pool.imap_unordered(functools.partial(_DictForPath, options),
java_file_paths):
if d:
results.append(d)
results[d['MODULE_NAME']].append(d)
combined_dicts = collections.defaultdict(dict)
for module_name, module_results in results.items():
# Sort to make output deterministic.
results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = {}
module_results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = combined_dicts[module_name]
for key in MERGEABLE_KEYS:
combined_dict[key] = ''.join(d.get(key, '') for d in results)
combined_dict[key] = ''.join(d.get(key, '') for d in module_results)
# PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have
# duplicates for JNI multiplexing since all native methods with similar
# signatures map to the same proxy. Similarly, there may be multiple switch
# case entries for the same proxy signatures.
if proxy_opts.enable_jni_multiplexing:
if options.enable_jni_multiplexing:
proxy_signatures_list = sorted(
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
signature for signature in proxy_signatures_list)
proxy_native_array_list = sorted(
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split('},\n')))
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split(
'},\n')))
combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join(
p for p in proxy_native_array_list if p != '') + '}'
signature_to_cases = collections.defaultdict(list)
for d in results:
for d in module_results:
for signature, cases in d['SIGNATURE_TO_CASES'].items():
signature_to_cases[signature].extend(cases)
combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls(
signature_to_cases, namespace)
signature_to_cases, module_name)
if options.header_path:
assert len(
combined_dicts) == 1, 'Cannot output a header for multiple modules'
module_name = next(iter(combined_dicts))
combined_dict = combined_dicts[module_name]
if header_path:
combined_dict['HEADER_GUARD'] = \
os.path.splitext(header_path)[0].replace('/', '_').upper() + '_'
combined_dict['NAMESPACE'] = namespace
header_content = CreateFromDict(
combined_dict,
proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
manual_jni_registration=proxy_opts.manual_jni_registration)
with build_utils.AtomicOutput(header_path, mode='w') as f:
os.path.splitext(options.header_path)[0].replace('/', '_').replace('.', '_').upper() + '_'
combined_dict['NAMESPACE'] = options.namespace
header_content = CreateFromDict(options, module_name, combined_dict)
with build_utils.AtomicOutput(options.header_path, mode='w') as f:
f.write(header_content)
with build_utils.AtomicOutput(srcjar_path) as f:
with build_utils.AtomicOutput(options.srcjar_path) as f:
with zipfile.ZipFile(f, 'w') as srcjar:
if proxy_opts.use_hash or proxy_opts.enable_jni_multiplexing:
for module_name, combined_dict in combined_dicts.items():
if options.use_proxy_hash or options.enable_jni_multiplexing:
# J/N.java
build_utils.AddToZipHermetic(
srcjar,
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(True),
data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
'%s.java' %
jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name),
data=CreateProxyJavaFromDict(options, module_name, combined_dict))
# org/chromium/base/natives/GEN_JNI.java
build_utils.AddToZipHermetic(
srcjar,
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False),
data=CreateProxyJavaFromDict(
combined_dict, proxy_opts, forwarding=True))
'%s.java' %
jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name),
data=CreateProxyJavaFromDict(options,
module_name,
combined_dict,
forwarding=True))
else:
# org/chromium/base/natives/GEN_JNI.java
build_utils.AddToZipHermetic(
srcjar,
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(False),
data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
'%s.java' %
jni_generator.ProxyHelpers.GetQualifiedClass(False, module_name),
data=CreateProxyJavaFromDict(options, module_name, combined_dict))
def _DictForPath(path,
use_proxy_hash=False,
enable_jni_multiplexing=False,
namespace='',
include_test_only=True):
def _DictForPath(options, path):
with open(path) as f:
contents = jni_generator.RemoveComments(f.read())
if '@JniIgnoreNatives' in contents:
@ -146,13 +141,14 @@ def _DictForPath(path,
fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName(
path, contents)
natives = jni_generator.ExtractNatives(contents, 'long')
natives += jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
natives, module_name = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class=fully_qualified_class,
contents=contents,
ptr_type='long',
include_test_only=include_test_only)
include_test_only=options.include_test_only)
natives += jni_generator.ExtractNatives(contents, 'long')
if len(natives) == 0:
return None
# The namespace for the content is separate from the namespace for the
@ -161,19 +157,13 @@ def _DictForPath(path,
jni_params = jni_generator.JniParams(fully_qualified_class)
jni_params.ExtractImportsAndInnerClasses(contents)
is_main_dex = jni_generator.IsMainDexJavaClass(contents)
header_generator = HeaderGenerator(
namespace,
content_namespace,
fully_qualified_class,
natives,
jni_params,
is_main_dex,
use_proxy_hash,
enable_jni_multiplexing=enable_jni_multiplexing)
return header_generator.Generate()
dict_generator = DictionaryGenerator(options, module_name, content_namespace,
fully_qualified_class, natives,
jni_params, is_main_dex)
return dict_generator.Generate()
def _AddForwardingCalls(signature_to_cases, namespace):
def _AddForwardingCalls(signature_to_cases, module_name):
template = string.Template("""
JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}(
JNIEnv* env,
@ -199,7 +189,8 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
jni_generator.JavaDataTypeToC(return_type),
'CLASS_NAME':
jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(True) + namespace),
jni_generator.ProxyHelpers.GetQualifiedClass(True,
module_name)),
'PROXY_SIGNATURE':
jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list)),
@ -214,9 +205,7 @@ ${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
return ''.join(s for s in switch_statements)
def _SetProxyRegistrationFields(registration_dict, use_hash,
enable_jni_multiplexing,
manual_jni_registration):
def _SetProxyRegistrationFields(options, module_name, registration_dict):
registration_template = string.Template("""\
static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = {
@ -279,20 +268,20 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
} // namespace ${NAMESPACE}
""")
short_name = options.use_proxy_hash or options.enable_jni_multiplexing
sub_dict = {
'ESCAPED_PROXY_CLASS':
jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(
use_hash or enable_jni_multiplexing)),
jni_generator.ProxyHelpers.GetQualifiedClass(short_name,
module_name)),
'PROXY_CLASS':
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash
or enable_jni_multiplexing),
jni_generator.ProxyHelpers.GetQualifiedClass(short_name, module_name),
'KMETHODS':
registration_dict['PROXY_NATIVE_METHOD_ARRAY'],
'REGISTRATION_NAME':
jni_generator.GetRegistrationFunctionName(
jni_generator.ProxyHelpers.GetQualifiedClass(
use_hash or enable_jni_multiplexing)),
jni_generator.ProxyHelpers.GetQualifiedClass(short_name,
module_name)),
}
if registration_dict['PROXY_NATIVE_METHOD_ARRAY']:
@ -316,14 +305,17 @@ ${REGISTER_NON_MAIN_DEX_NATIVES}
registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration
registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call
if manual_jni_registration:
if options.manual_jni_registration:
registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute(
registration_dict)
else:
registration_dict['MANUAL_REGISTRATION'] = ''
def CreateProxyJavaFromDict(registration_dict, proxy_opts, forwarding=False):
def CreateProxyJavaFromDict(options,
module_name,
registration_dict,
forwarding=False):
template = string.Template("""\
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
@ -341,19 +333,20 @@ ${METHODS}
}
""")
is_natives_class = not forwarding and (proxy_opts.use_hash
or proxy_opts.enable_jni_multiplexing)
class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class)
is_natives_class = not forwarding and (options.use_proxy_hash
or options.enable_jni_multiplexing)
class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class,
module_name)
package = jni_generator.ProxyHelpers.GetPackage(is_natives_class)
if forwarding or not (proxy_opts.use_hash
or proxy_opts.enable_jni_multiplexing):
if forwarding or not (options.use_proxy_hash
or options.enable_jni_multiplexing):
fields = string.Template("""\
public static final boolean TESTING_ENABLED = ${TESTING_ENABLED};
public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK};
""").substitute({
'TESTING_ENABLED': str(proxy_opts.enable_mocks).lower(),
'REQUIRE_MOCK': str(proxy_opts.require_mocks).lower(),
'TESTING_ENABLED': str(options.enable_proxy_mocks).lower(),
'REQUIRE_MOCK': str(options.require_mocks).lower(),
})
else:
fields = ''
@ -371,10 +364,7 @@ ${METHODS}
})
def CreateFromDict(registration_dict,
use_hash,
enable_jni_multiplexing=False,
manual_jni_registration=False):
def CreateFromDict(options, module_name, registration_dict):
"""Returns the content of the header file."""
template = string.Template("""\
@ -408,9 +398,8 @@ ${FORWARDING_CALLS}
${MANUAL_REGISTRATION}
#endif // ${HEADER_GUARD}
""")
_SetProxyRegistrationFields(registration_dict, use_hash,
enable_jni_multiplexing, manual_jni_registration)
if not enable_jni_multiplexing:
_SetProxyRegistrationFields(options, module_name, registration_dict)
if not options.enable_jni_multiplexing:
registration_dict['FORWARDING_CALLS'] = ''
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return ''
@ -436,19 +425,13 @@ def _GetJavaToNativeParamsList(params_list):
return 'jlong switch_num, ' + ', '.join(params_in_stub)
class HeaderGenerator(object):
class DictionaryGenerator(object):
"""Generates an inline header file for JNI registration."""
def __init__(self,
namespace,
content_namespace,
fully_qualified_class,
natives,
jni_params,
main_dex,
use_proxy_hash,
enable_jni_multiplexing=False):
self.namespace = namespace
def __init__(self, options, module_name, content_namespace,
fully_qualified_class, natives, jni_params, main_dex):
self.options = options
self.module_name = module_name
self.content_namespace = content_namespace
self.natives = natives
self.proxy_natives = [n for n in natives if n.is_proxy]
@ -459,15 +442,17 @@ class HeaderGenerator(object):
self.main_dex = main_dex
self.helper = jni_generator.HeaderFileGeneratorHelper(
self.class_name,
self.module_name,
fully_qualified_class,
use_proxy_hash,
enable_jni_multiplexing=enable_jni_multiplexing)
self.use_proxy_hash = use_proxy_hash
self.enable_jni_multiplexing = enable_jni_multiplexing
options.use_proxy_hash,
enable_jni_multiplexing=options.enable_jni_multiplexing)
self.registration_dict = None
def Generate(self):
self.registration_dict = {'FULL_CLASS_NAME': self.fully_qualified_class}
self.registration_dict = {
'FULL_CLASS_NAME': self.fully_qualified_class,
'MODULE_NAME': self.module_name
}
self._AddClassPathDeclarations()
self._AddForwardDeclaration()
self._AddJNINativeMethodsArrays()
@ -476,19 +461,16 @@ class HeaderGenerator(object):
self._AddRegisterNativesFunctions()
self.registration_dict['PROXY_NATIVE_SIGNATURES'] = (''.join(
_MakeProxySignature(
native,
self.use_proxy_hash,
enable_jni_multiplexing=self.enable_jni_multiplexing)
_MakeProxySignature(self.options, native)
for native in self.proxy_natives))
if self.enable_jni_multiplexing:
if self.options.enable_jni_multiplexing:
self._AssignSwitchNumberToNatives()
self._AddCases()
if self.use_proxy_hash or self.enable_jni_multiplexing:
if self.options.use_proxy_hash or self.options.enable_jni_multiplexing:
self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join(
_MakeForwardingProxy(
native, enable_jni_multiplexing=self.enable_jni_multiplexing)
_MakeForwardingProxy(self.options, self.module_name, native)
for native in self.proxy_natives))
return self.registration_dict
@ -582,10 +564,11 @@ ${KMETHODS}
if native.is_proxy:
# Literal name of the native method in the class that contains the actual
# native declaration.
if self.enable_jni_multiplexing:
if self.options.enable_jni_multiplexing:
return_type, params_list = native.return_and_signature
class_name = jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(True) + self.namespace)
jni_generator.ProxyHelpers.GetQualifiedClass(
True, self.module_name))
proxy_signature = jni_generator.EscapeClassName(
_GetMultiplexProxyName(return_type, params_list))
@ -594,7 +577,7 @@ ${KMETHODS}
[jni_generator.Param(datatype='long', name='switch_num')] +
native.params, native.return_type)
stub_name = 'Java_' + class_name + '_' + proxy_signature
elif self.use_proxy_hash:
elif self.options.use_proxy_hash:
name = native.hashed_proxy_name
else:
name = native.proxy_name
@ -608,7 +591,7 @@ ${KMETHODS}
def _AddProxyNativeMethodKStrings(self):
"""Returns KMethodString for wrapped native methods in all_classes """
if self.main_dex or self.enable_jni_multiplexing:
if self.main_dex or self.options.enable_jni_multiplexing:
key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'
else:
key = 'PROXY_NATIVE_METHOD_ARRAY'
@ -618,7 +601,7 @@ ${KMETHODS}
self._SetDictValue(key, proxy_k_strings)
def _SubstituteNativeMethods(self, template, sub_proxy=False):
def _SubstituteNativeMethods(self, template):
"""Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided
template."""
ret = []
@ -626,9 +609,9 @@ ${KMETHODS}
all_classes[self.class_name] = self.fully_qualified_class
for clazz, full_clazz in all_classes.items():
if not sub_proxy:
if clazz == jni_generator.ProxyHelpers.GetClass(
self.use_proxy_hash or self.enable_jni_multiplexing):
self.options.use_proxy_hash or self.options.enable_jni_multiplexing,
self.module_name):
continue
kmethods = self._GetKMethodsString(clazz)
@ -723,6 +706,8 @@ ${NATIVES}\
params = _GetParamsListForMultiplex(signature[1], with_types=False)
values = {
'SWITCH_NUM': native.switch_num,
# We are forced to call the generated stub instead of the impl because
# the impl is not guaranteed to have a globally unique name.
'STUB_NAME': self.helper.GetStubName(native),
'PARAMS': params,
}
@ -778,7 +763,7 @@ def _GetMultiplexProxyName(return_type, params_list):
return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params
def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
def _MakeForwardingProxy(options, module_name, proxy_native):
template = string.Template("""
public static ${RETURN_TYPE} ${METHOD_NAME}(${PARAMS_WITH_TYPES}) {
${MAYBE_RETURN}${PROXY_CLASS}.${PROXY_METHOD_NAME}(${PARAM_NAMES});
@ -787,9 +772,9 @@ def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
params_with_types = ', '.join(
'%s %s' % (p.datatype, p.name) for p in proxy_native.params)
param_names = ', '.join(p.name for p in proxy_native.params)
proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True)
proxy_class = jni_generator.ProxyHelpers.GetQualifiedClass(True, module_name)
if enable_jni_multiplexing:
if options.enable_jni_multiplexing:
if not param_names:
param_names = proxy_native.switch_num + 'L'
else:
@ -817,15 +802,13 @@ def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
})
def _MakeProxySignature(proxy_native,
use_proxy_hash,
enable_jni_multiplexing=False):
def _MakeProxySignature(options, proxy_native):
params_with_types = ', '.join('%s %s' % (p.datatype, p.name)
for p in proxy_native.params)
native_method_line = """
public static native ${RETURN} ${PROXY_NAME}(${PARAMS_WITH_TYPES});"""
if enable_jni_multiplexing:
if options.enable_jni_multiplexing:
# This has to be only one line and without comments because all the proxy
# signatures will be joined, then split on new lines with duplicates removed
# since multiple |proxy_native|s map to the same multiplexed signature.
@ -836,7 +819,7 @@ def _MakeProxySignature(proxy_native,
proxy_name = _GetMultiplexProxyName(return_type, params_list)
params_with_types = 'long switch_num' + _GetParamsListForMultiplex(
params_list, with_types=True)
elif use_proxy_hash:
elif options.use_proxy_hash:
signature_template = string.Template("""
// Original name: ${ALT_NAME}""" + native_method_line)
@ -859,18 +842,6 @@ def _MakeProxySignature(proxy_native,
})
class ProxyOptions:
def __init__(self, **kwargs):
self.use_hash = kwargs.get('use_hash', False)
self.enable_jni_multiplexing = kwargs.get('enable_jni_multiplexing', False)
self.manual_jni_registration = kwargs.get('manual_jni_registration', False)
self.enable_mocks = kwargs.get('enable_mocks', False)
self.require_mocks = kwargs.get('require_mocks', False)
# Can never require and disable.
assert self.enable_mocks or not self.require_mocks
def main(argv):
arg_parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(arg_parser)
@ -888,64 +859,56 @@ def main(argv):
required=True,
help='Path to output srcjar for GEN_JNI.java (and J/N.java if proxy'
' hash is enabled).')
arg_parser.add_argument(
'--sources-exclusions',
arg_parser.add_argument('--file-exclusions',
default=[],
help='A list of Java files which should be ignored '
'by the parser.')
arg_parser.add_argument(
'--namespace',
default='',
help='Namespace to wrap the registration functions '
help='Native namespace to wrap the registration functions '
'into.')
# TODO(crbug.com/898261) hook these flags up to the build config to enable
# mocking in instrumentation tests
arg_parser.add_argument(
'--enable_proxy_mocks',
'--enable-proxy-mocks',
default=False,
action='store_true',
help='Allows proxy native impls to be mocked through Java.')
arg_parser.add_argument(
'--require_mocks',
'--require-mocks',
default=False,
action='store_true',
help='Requires all used native implementations to have a mock set when '
'called. Otherwise an exception will be thrown.')
arg_parser.add_argument(
'--use_proxy_hash',
'--use-proxy-hash',
action='store_true',
help='Enables hashing of the native declaration for methods in '
'an @JniNatives interface')
arg_parser.add_argument(
'--enable_jni_multiplexing',
'--enable-jni-multiplexing',
action='store_true',
help='Enables JNI multiplexing for Java native methods')
arg_parser.add_argument(
'--manual_jni_registration',
'--manual-jni-registration',
action='store_true',
help='Manually do JNI registration - required for crazy linker')
arg_parser.add_argument('--include_test_only',
arg_parser.add_argument('--include-test-only',
action='store_true',
help='Whether to maintain ForTesting JNI methods.')
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
if not args.enable_proxy_mocks and args.require_mocks:
arg_parser.error(
'Invalid arguments: --require_mocks without --enable_proxy_mocks. '
'Invalid arguments: --require-mocks without --enable-proxy-mocks. '
'Cannot require mocks if they are not enabled.')
if not args.header_path and args.manual_jni_registration:
arg_parser.error(
'Invalid arguments: --manual_jni_registration without --header-path. '
'Invalid arguments: --manual-jni-registration without --header-path. '
'Cannot manually register JNI if there is no output header file.')
sources_files = sorted(set(build_utils.ParseGnList(args.sources_files)))
proxy_opts = ProxyOptions(
use_hash=args.use_proxy_hash,
enable_jni_multiplexing=args.enable_jni_multiplexing,
manual_jni_registration=args.manual_jni_registration,
require_mocks=args.require_mocks,
enable_mocks=args.enable_proxy_mocks)
java_file_paths = []
for f in sources_files:
@ -953,13 +916,8 @@ def main(argv):
# skip Kotlin files as they are not supported by JNI generation.
java_file_paths.extend(
p for p in build_utils.ReadSourcesList(f) if p.startswith('..')
and p not in args.sources_exclusions and not p.endswith('.kt'))
_Generate(java_file_paths,
args.srcjar_path,
proxy_opts=proxy_opts,
header_path=args.header_path,
namespace=args.namespace,
include_test_only=args.include_test_only)
and p not in args.file_exclusions and not p.endswith('.kt'))
_Generate(args, java_file_paths)
if args.depfile:
build_utils.WriteDepfile(args.depfile, args.srcjar_path,

View File

@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "base/android/jni_android.h"
#include "base/android/jni_generator/sample_jni_apk__final_jni_generated.h"
#include "base/android/jni_generator/jni_registration_generated.h"
#include "base/android/jni_utils.h"
// This is called by the VM when the shared library is first loaded.

View File

@ -0,0 +1,105 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/meminfo_dump_provider.h"
#include <jni.h>
#include "base/android/jni_android.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/base_jni_headers/MemoryInfoBridge_jni.h"
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base::android {
MeminfoDumpProvider::MeminfoDumpProvider() {
#if BUILDFLAG(ENABLE_BASE_TRACING)
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, kDumpProviderName, nullptr);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
MeminfoDumpProvider& MeminfoDumpProvider::Initialize() {
static base::NoDestructor<MeminfoDumpProvider> instance;
return *instance.get();
}
bool MeminfoDumpProvider::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// This is best-effort, and will be wrong if there are other callers of
// ActivityManager#getProcessMemoryInfo(), either in this process or from
// another process which is allowed to do so (typically, adb).
//
// However, since the framework doesn't document throttling in any non-vague
// terms and the results are not timestamped, this is the best we can do. The
// delay and the rest of the assumptions here come from
// https://android.googlesource.com/platform/frameworks/base/+/refs/heads/android13-dev/services/core/java/com/android/server/am/ActivityManagerService.java#4093.
//
// We could always report the value on pre-Q devices, but that would skew
// reported data. Also, some OEMs may have cherry-picked the Q change, meaning
// that it's safer and more accurate to not report likely-stale data on all
// Android releases.
base::TimeTicks now = base::TimeTicks::Now();
bool stale_data = (now - last_collection_time_) < base::Minutes(5);
// Background data dumps (as in the BACKGROUND level of detail, not the
// application being in background) should not include stale data, since it
// would confuse data in UMA. In particular, the background/foreground session
// filter would no longer be accurate.
if (stale_data && args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::DETAILED) {
return true;
}
base::trace_event::MemoryAllocatorDump* dump =
pmd->CreateAllocatorDump(kDumpName);
// Data is either expected to be fresh, or this is a manually requested dump,
// and we should still report data, but note that it is stale.
dump->AddScalar(kIsStaleName, "bool", stale_data);
last_collection_time_ = now;
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobject> memory_info =
Java_MemoryInfoBridge_getActivityManagerMemoryInfoForSelf(env);
// Tell the manager that collection failed. Since this is likely not a
// transient failure, don't return an empty dump, and let the manager exclude
// this provider from the next dump.
if (memory_info.is_null()) {
LOG(WARNING) << "Got a null value";
return false;
}
ScopedJavaLocalRef<jclass> clazz{env, env->GetObjectClass(memory_info.obj())};
jfieldID other_private_dirty_id =
env->GetFieldID(clazz.obj(), "otherPrivateDirty", "I");
jfieldID other_pss_id = env->GetFieldID(clazz.obj(), "otherPss", "I");
int other_private_dirty_kb =
env->GetIntField(memory_info.obj(), other_private_dirty_id);
int other_pss_kb = env->GetIntField(memory_info.obj(), other_pss_id);
// What "other" covers is not documented in Debug#MemoryInfo, nor in
// ActivityManager#getProcessMemoryInfo. However, it calls
// Debug#getMemoryInfo(), which ends up summing all the heaps in the range
// [HEAP_DALVIK_OTHER, HEAP_OTHER_MEMTRACK]. See the definitions in
// https://android.googlesource.com/platform/frameworks/base/+/0b7c1774ba42daef7c80bf2f00fe1c0327e756ae/core/jni/android_os_Debug.cpp#60,
// and the code in android_os_Debug_getDirtyPagesPid() in the same file.
dump->AddScalar(kPrivateDirtyMetricName, "bytes",
static_cast<uint64_t>(other_private_dirty_kb) * 1024);
dump->AddScalar(kPssMetricName, "bytes",
static_cast<uint64_t>(other_pss_kb) * 1024);
return true;
#else // BUILDFLAG(ENABLE_BASE_TRACING)
return false;
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace base::android

View File

@ -0,0 +1,38 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#define BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_
#include "base/base_export.h"
#include "base/no_destructor.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
namespace base::android {
class BASE_EXPORT MeminfoDumpProvider
: public base::trace_event::MemoryDumpProvider {
public:
// Returns the instance for testing.
static MeminfoDumpProvider& Initialize();
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
static constexpr char kDumpProviderName[] = "android_meminfo";
static constexpr char kDumpName[] = "meminfo";
static constexpr char kIsStaleName[] = "is_stale";
static constexpr char kPssMetricName[] = "other_pss";
static constexpr char kPrivateDirtyMetricName[] = "other_private_dirty";
private:
friend class base::NoDestructor<MeminfoDumpProvider>;
MeminfoDumpProvider();
base::TimeTicks last_collection_time_;
};
} // namespace base::android
#endif // BASE_ANDROID_MEMINFO_DUMP_PROVIDER_H_

View File

@ -4,6 +4,7 @@
#include "base/android/callback_android.h"
#include "base/android/jni_android.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
#include "base/base_jni_headers/NativeUmaRecorder_jni.h"
#include "base/format_macros.h"
@ -264,6 +265,36 @@ jint JNI_NativeUmaRecorder_GetHistogramTotalCountForTesting(
return actual_count;
}
// Returns an array with 3 entries for each bucket, representing (min, max,
// count).
ScopedJavaLocalRef<jlongArray>
JNI_NativeUmaRecorder_GetHistogramSamplesForTesting(
JNIEnv* env,
const JavaParamRef<jstring>& histogram_name) {
std::string name = android::ConvertJavaStringToUTF8(env, histogram_name);
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
std::vector<int64_t> buckets;
if (histogram == nullptr) {
// No samples have been recorded for this histogram.
return base::android::ToJavaLongArray(env, buckets);
}
std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
for (auto sampleCountIterator = samples->Iterator();
!sampleCountIterator->Done(); sampleCountIterator->Next()) {
HistogramBase::Sample min;
int64_t max;
HistogramBase::Count count;
sampleCountIterator->Get(&min, &max, &count);
buckets.push_back(min);
buckets.push_back(max);
buckets.push_back(count);
}
return base::android::ToJavaLongArray(env, buckets);
}
jlong JNI_NativeUmaRecorder_CreateHistogramSnapshotForTesting(JNIEnv* env) {
HistogramsSnapshot* snapshot = new HistogramsSnapshot();
for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {

View File

@ -240,7 +240,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This can get very large as it constructs the whole data structure in
// memory before dumping it to the file.
Value root(Value::Type::DICTIONARY);
Value root(Value::Type::DICT);
uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed);
root.SetStringKey("total_calls_count",
base::StringPrintf("%" PRIu32, total_calls_count));
@ -252,7 +252,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// This callee was never called.
continue;
Value callee_element(Value::Type::DICTIONARY);
Value callee_element(Value::Type::DICT);
uint32_t callee_offset = i * 4;
callee_element.SetStringKey("index",
base::StringPrintf("%" PRIuS, caller_index));
@ -278,7 +278,7 @@ NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
// No misses.
continue;
Value caller_count(Value::Type::DICTIONARY);
Value caller_count(Value::Type::DICT);
caller_count.SetStringKey("caller_offset",
base::StringPrintf("%" PRIu32, caller_offset));
caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count));

View File

@ -2,6 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// IMPORTANT NOTE: deprecated. Use std::atomic instead.
//
// Rationale:
// - Uniformity: most of the code uses std::atomic, and the underlying
// implementation is the same. Use the STL one.
// - Clearer code: return values from some operations (e.g. CompareAndSwap)
// differ from the equivalent ones in std::atomic, leading to confusion.
// - Richer semantics: can use actual types, rather than e.g. Atomic32 for a
// boolean flag, or AtomicWord for T*. Bitwise operations (e.g. fetch_or())
// are only in std::atomic.
// - Harder to misuse: base::subtle::Atomic32 is just an int, making it possible
// to accidentally manipulate, not realizing that there are no atomic
// semantics attached to it. For instance, "Atomic32 a; a++;" is almost
// certainly incorrect.
// For atomic operations on reference counts, see atomic_refcount.h.
// For atomic operations on sequence numbers, see atomic_sequence_num.h.

View File

@ -137,9 +137,8 @@ class BASE_EXPORT BigEndianWriter {
template<typename T>
bool Write(T v);
// TODO(crbug.com/1298696): Breaks net_unittests.
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> ptr_;
raw_ptr<char, DanglingUntriagedDegradeToNoOpWhenMTE> end_;
raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> ptr_;
raw_ptr<char, DanglingUntriaged | AllowPtrArithmetic> end_;
};
} // namespace base

View File

@ -180,10 +180,24 @@ std::ostream& CheckError::stream() {
}
CheckError::~CheckError() {
// TODO(crbug.com/1409729): Consider splitting out CHECK from DCHECK so that
// the destructor can be marked [[noreturn]] and we don't need to check
// severity in the destructor.
const bool is_fatal = log_message_->severity() == LOGGING_FATAL;
// Note: This function ends up in crash stack traces. If its full name
// changes, the crash server's magic signature logic needs to be updated.
// See cl/306632920.
delete log_message_;
// Make sure we crash even if LOG(FATAL) has been overridden.
// TODO(crbug.com/1409729): Include Windows here too. This is done in steps to
// prevent backsliding on platforms where this goes through CQ.
// Windows is blocked by:
// * All/RenderProcessHostWriteableFileDeathTest.
// PassUnsafeWriteableExecutableFile/2
if (is_fatal && !BUILDFLAG(IS_WIN)) {
base::ImmediateCrash();
}
}
NotReachedError NotReachedError::NotReached(const char* file, int line) {
@ -198,13 +212,47 @@ NotReachedError NotReachedError::NotReached(const char* file, int line) {
}
void NotReachedError::TriggerNotReached() {
// TODO(pbos): Add back NotReachedError("", -1) here asap. This was removed to
// disable NOTREACHED() reports temporarily for M111 and should be added
// back once this change has merged to M111.
// This triggers a NOTREACHED() error as the returned NotReachedError goes out
// of scope.
NotReached("", -1);
}
NotReachedError::~NotReachedError() = default;
NotReachedNoreturnError::NotReachedNoreturnError(const char* file, int line)
: CheckError([file, line]() {
auto* const log_message = new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "NOTREACHED hit. ";
return log_message;
}()) {}
// Note: This function ends up in crash stack traces. If its full name changes,
// the crash server's magic signature logic needs to be updated. See
// cl/306632920.
NotReachedNoreturnError::~NotReachedNoreturnError() {
delete log_message_;
// Make sure we die if we haven't. LOG(FATAL) is not yet [[noreturn]] as of
// writing this.
base::ImmediateCrash();
}
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
is_dcheck ? new DCheckLogMessage(file, line, LOGGING_DCHECK)
: new LogMessage(file, line, LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
void RawCheck(const char* message) {
RawLog(LOGGING_FATAL, message);
}

View File

@ -93,7 +93,7 @@ class BASE_EXPORT CheckError {
return stream() << streamed_type;
}
private:
protected:
LogMessage* const log_message_;
};
@ -113,6 +113,15 @@ class BASE_EXPORT NotReachedError : public CheckError {
using CheckError::CheckError;
};
// TODO(crbug.com/851128): This should take the name of the above class once all
// callers of NOTREACHED() have migrated to the CHECK-fatal version.
class BASE_EXPORT NotReachedNoreturnError : public CheckError {
public:
NotReachedNoreturnError(const char* file, int line);
[[noreturn]] NOMERGE NOINLINE NOT_TAIL_CALLED ~NotReachedNoreturnError();
};
// The 'switch' is used to prevent the 'else' from being ambiguous when the
// macro is used in an 'if' clause such as:
// if (a == 1)

View File

@ -76,19 +76,4 @@ char* StreamValToStr(const void* v,
return strdup(ss.str().c_str());
}
LogMessage* CheckOpResult::CreateLogMessage(bool is_dcheck,
const char* file,
int line,
const char* expr_str,
char* v1_str,
char* v2_str) {
LogMessage* const log_message =
new LogMessage(file, line, is_dcheck ? LOGGING_DCHECK : LOGGING_FATAL);
log_message->stream() << "Check failed: " << expr_str << " (" << v1_str
<< " vs. " << v2_str << ")";
free(v1_str);
free(v2_str);
return log_message;
}
} // namespace logging

View File

@ -64,7 +64,7 @@
// folding of multiple identical caller functions into a single signature. To
// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
// Use like:
// void NOT_TAIL_CALLED FooBar();
// NOT_TAIL_CALLED void FooBar();
#if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called)
#define NOT_TAIL_CALLED __attribute__((not_tail_called))
#else

View File

@ -253,6 +253,15 @@ class EnumSet {
// Removes all values from our set.
void Clear() { enums_.reset(); }
// Conditionally puts or removes `value`, based on `should_be_present`.
void PutOrRemove(E value, bool should_be_present) {
if (should_be_present) {
Put(value);
} else {
Remove(value);
}
}
// Returns true iff the given value is in range and a member of our set.
constexpr bool Has(E value) const {
return InRange(value) && enums_[ToIndex(value)];

View File

@ -250,7 +250,7 @@ class small_map {
inline explicit iterator(const typename NormalMap::iterator& init)
: array_iter_(nullptr), map_iter_(init) {}
raw_ptr<value_type> array_iter_;
raw_ptr<value_type, AllowPtrArithmetic> array_iter_;
typename NormalMap::iterator map_iter_;
};
@ -327,7 +327,7 @@ class small_map {
const typename NormalMap::const_iterator& init)
: array_iter_(nullptr), map_iter_(init) {}
raw_ptr<const value_type> array_iter_;
raw_ptr<const value_type, AllowPtrArithmetic> array_iter_;
typename NormalMap::const_iterator map_iter_;
};

View File

@ -18,6 +18,7 @@
#include "base/containers/checked_iterators.h"
#include "base/containers/contiguous_iterator.h"
#include "base/cxx20_to_address.h"
#include "base/numerics/safe_math.h"
namespace base {
@ -256,16 +257,16 @@ class GSL_POINTER span : public internal::ExtentStorage<Extent> {
template <typename It,
typename = internal::EnableIfCompatibleContiguousIterator<It, T>>
constexpr span(It first, size_t count) noexcept
constexpr span(It first, StrictNumeric<size_t> count) noexcept
: ExtentStorage(count),
// The use of to_address() here is to handle the case where the iterator
// `first` is pointing to the container's `end()`. In that case we can
// not use the address returned from the iterator, or dereference it
// through the iterator's `operator*`, but we can store it. We must assume
// in this case that `count` is 0, since the iterator does not point to
// valid data. Future hardening of iterators may disallow pulling the
// address from `end()`, as demonstrated by asserts() in libstdc++:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960.
// through the iterator's `operator*`, but we can store it. We must
// assume in this case that `count` is 0, since the iterator does not
// point to valid data. Future hardening of iterators may disallow
// pulling the address from `end()`, as demonstrated by asserts() in
// libstdc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960.
//
// The span API dictates that the `data()` is accessible when size is 0,
// since the pointer may be valid, so we cannot prevent storing and
@ -473,7 +474,7 @@ as_writable_bytes(span<T, X> s) noexcept {
// Type-deducing helpers for constructing a span.
template <int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, size_t size) noexcept {
constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T>(it, size);
}
@ -508,7 +509,7 @@ constexpr auto make_span(Container&& container) noexcept {
//
// Usage: auto static_span = base::make_span<N>(...);
template <size_t N, int&... ExplicitArgumentBarrier, typename It>
constexpr auto make_span(It it, size_t size) noexcept {
constexpr auto make_span(It it, StrictNumeric<size_t> size) noexcept {
using T = std::remove_reference_t<iter_reference_t<It>>;
return span<T, N>(it, size);
}

View File

@ -5,27 +5,13 @@
#ifndef BASE_CXX17_BACKPORTS_H_
#define BASE_CXX17_BACKPORTS_H_
#include <functional>
#include "base/check.h"
#include <algorithm>
namespace base {
// C++14 implementation of C++17's std::clamp():
// https://en.cppreference.com/w/cpp/algorithm/clamp
// Please note that the C++ spec makes it undefined behavior to call std::clamp
// with a value of `lo` that compares greater than the value of `hi`. This
// implementation uses a CHECK to enforce this as a hard restriction.
template <typename T, typename Compare>
constexpr const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) {
CHECK(!comp(hi, lo));
return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
}
template <typename T>
constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
return base::clamp(v, lo, hi, std::less<T>{});
}
// TODO(crbug.com/1373621): Rewrite all uses of base::clamp as std::clamp and
// remove this file.
using std::clamp;
} // namespace base

View File

@ -1,5 +1,2 @@
# For activity tracking:
per-file activity_*=bcwhite@chromium.org
# For ASan integration:
per-file asan_service*=file://base/memory/MIRACLE_PTR_OWNERS

View File

@ -1,407 +0,0 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/debug/activity_analyzer.h"
#include <utility>
#include "base/check_op.h"
#include "base/containers/contains.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
namespace base {
namespace debug {
namespace {
const ActivityUserData::Snapshot& GetEmptyUserDataSnapshot() {
// An empty snapshot that can be returned when there otherwise is none.
static const NoDestructor<ActivityUserData::Snapshot> empty_snapshot;
return *empty_snapshot;
}
// DO NOT CHANGE VALUES. This is logged persistently in a histogram.
enum AnalyzerCreationError {
kInvalidMemoryMappedFile,
kPmaBadFile,
kPmaUninitialized,
kPmaDeleted,
kPmaCorrupt,
kAnalyzerCreationErrorMax // Keep this last.
};
void LogAnalyzerCreationError(AnalyzerCreationError error) {
UmaHistogramEnumeration("ActivityTracker.Collect.AnalyzerCreationError",
error, kAnalyzerCreationErrorMax);
}
} // namespace
ThreadActivityAnalyzer::Snapshot::Snapshot() = default;
ThreadActivityAnalyzer::Snapshot::~Snapshot() = default;
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
const ThreadActivityTracker& tracker)
: activity_snapshot_valid_(tracker.CreateSnapshot(&activity_snapshot_)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
: ThreadActivityAnalyzer(ThreadActivityTracker(base, size)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference)
: ThreadActivityAnalyzer(allocator->GetAsArray<char>(
reference,
GlobalActivityTracker::kTypeIdActivityTracker,
PersistentMemoryAllocator::kSizeAny),
allocator->GetAllocSize(reference)) {}
ThreadActivityAnalyzer::~ThreadActivityAnalyzer() = default;
void ThreadActivityAnalyzer::AddGlobalInformation(
GlobalActivityAnalyzer* global) {
if (!IsValid())
return;
// User-data is held at the global scope even though it's referenced at the
// thread scope.
activity_snapshot_.user_data_stack.clear();
for (auto& activity : activity_snapshot_.activity_stack) {
// The global GetUserDataSnapshot will return an empty snapshot if the ref
// or id is not valid.
activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
activity_snapshot_.process_id, activity.user_data_ref,
activity.user_data_id));
}
}
GlobalActivityAnalyzer::GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator)
: allocator_(std::move(allocator)),
analysis_stamp_(0LL),
allocator_iterator_(allocator_.get()) {
DCHECK(allocator_);
}
GlobalActivityAnalyzer::~GlobalActivityAnalyzer() = default;
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator) {
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
LogAnalyzerCreationError(kPmaUninitialized);
return nullptr;
}
if (allocator->GetMemoryState() ==
PersistentMemoryAllocator::MEMORY_DELETED) {
LogAnalyzerCreationError(kPmaDeleted);
return nullptr;
}
if (allocator->IsCorrupt()) {
LogAnalyzerCreationError(kPmaCorrupt);
return nullptr;
}
return std::make_unique<GlobalActivityAnalyzer>(std::move(allocator));
}
#if !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
const FilePath& file_path) {
// Map the file read-write so it can guarantee consistency between
// the analyzer and any trackers that my still be active.
std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
if (!mmfile->Initialize(file_path, MemoryMappedFile::READ_WRITE)) {
LogAnalyzerCreationError(kInvalidMemoryMappedFile);
return nullptr;
}
if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
LogAnalyzerCreationError(kPmaBadFile);
return nullptr;
}
return CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
std::move(mmfile), 0, 0, StringPiece(), /*readonly=*/true));
}
#endif // !BUILDFLAG(IS_NACL)
// static
std::unique_ptr<GlobalActivityAnalyzer>
GlobalActivityAnalyzer::CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping) {
if (!mapping.IsValid() ||
!ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
mapping)) {
return nullptr;
}
return CreateWithAllocator(
std::make_unique<ReadOnlySharedPersistentMemoryAllocator>(
std::move(mapping), 0, StringPiece()));
}
ProcessId GlobalActivityAnalyzer::GetFirstProcess() {
PrepareAllAnalyzers();
return GetNextProcess();
}
ProcessId GlobalActivityAnalyzer::GetNextProcess() {
if (process_ids_.empty())
return 0;
ProcessId pid = process_ids_.back();
process_ids_.pop_back();
return pid;
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer(
ProcessId pid) {
analyzers_iterator_ = analyzers_.begin();
analyzers_iterator_pid_ = pid;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
int64_t create_stamp;
if (analyzers_iterator_->second->GetProcessId(&create_stamp) == pid &&
create_stamp <= analysis_stamp_) {
return analyzers_iterator_->second.get();
}
return GetNextAnalyzer();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetNextAnalyzer() {
DCHECK(analyzers_iterator_ != analyzers_.end());
int64_t create_stamp;
do {
++analyzers_iterator_;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
} while (analyzers_iterator_->second->GetProcessId(&create_stamp) !=
analyzers_iterator_pid_ ||
create_stamp > analysis_stamp_);
return analyzers_iterator_->second.get();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
const ThreadKey& key) {
auto found = analyzers_.find(key);
if (found == analyzers_.end())
return nullptr;
return found->second.get();
}
ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
ProcessId pid,
uint32_t ref,
uint32_t id) {
ActivityUserData::Snapshot snapshot;
void* memory = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdUserDataRecord,
PersistentMemoryAllocator::kSizeAny);
if (memory) {
size_t size = allocator_->GetAllocSize(ref);
const ActivityUserData user_data(memory, size);
user_data.CreateSnapshot(&snapshot);
ProcessId process_id;
int64_t create_stamp;
if (!ActivityUserData::GetOwningProcessId(memory, &process_id,
&create_stamp) ||
process_id != pid || user_data.id() != id) {
// This allocation has been overwritten since it was created. Return an
// empty snapshot because whatever was captured is incorrect.
snapshot.clear();
}
}
return snapshot;
}
const ActivityUserData::Snapshot&
GlobalActivityAnalyzer::GetProcessDataSnapshot(ProcessId pid) {
auto iter = process_data_.find(pid);
if (iter == process_data_.end())
return GetEmptyUserDataSnapshot();
if (iter->second.create_stamp > analysis_stamp_)
return GetEmptyUserDataSnapshot();
DCHECK_EQ(pid, iter->second.process_id);
return iter->second.data;
}
std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
std::vector<std::string> messages;
PersistentMemoryAllocator::Reference ref;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
while ((ref = iter.GetNextOfType(
GlobalActivityTracker::kTypeIdGlobalLogMessage)) != 0) {
const char* message = allocator_->GetAsArray<char>(
ref, GlobalActivityTracker::kTypeIdGlobalLogMessage,
PersistentMemoryAllocator::kSizeAny);
if (message)
messages.push_back(message);
}
return messages;
}
std::vector<GlobalActivityTracker::ModuleInfo>
GlobalActivityAnalyzer::GetModules(ProcessId pid) {
std::vector<GlobalActivityTracker::ModuleInfo> modules;
PersistentMemoryAllocator::Iterator iter(allocator_.get());
const GlobalActivityTracker::ModuleInfoRecord* record;
while (
(record =
iter.GetNextOfObject<GlobalActivityTracker::ModuleInfoRecord>()) !=
nullptr) {
ProcessId process_id;
int64_t create_stamp;
if (!OwningProcess::GetOwningProcessId(&record->owner, &process_id,
&create_stamp) ||
pid != process_id || create_stamp > analysis_stamp_) {
continue;
}
GlobalActivityTracker::ModuleInfo info;
if (record->DecodeTo(&info, allocator_->GetAllocSize(
allocator_->GetAsReference(record)))) {
modules.push_back(std::move(info));
}
}
return modules;
}
GlobalActivityAnalyzer::ProgramLocation
GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
// This should be implemented but it's never been a priority.
return { 0, 0 };
}
bool GlobalActivityAnalyzer::IsDataComplete() const {
DCHECK(allocator_);
return !allocator_->IsFull();
}
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
const UserDataSnapshot& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
UserDataSnapshot&& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() = default;
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Record the time when analysis started.
analysis_stamp_ = base::Time::Now().ToInternalValue();
// Fetch all the records. This will retrieve only ones created since the
// last run since the PMA iterator will continue from where it left off.
uint32_t type;
PersistentMemoryAllocator::Reference ref;
while ((ref = allocator_iterator_.GetNext(&type)) != 0) {
switch (type) {
case GlobalActivityTracker::kTypeIdActivityTracker:
case GlobalActivityTracker::kTypeIdActivityTrackerFree:
case GlobalActivityTracker::kTypeIdProcessDataRecord:
case GlobalActivityTracker::kTypeIdProcessDataRecordFree:
case PersistentMemoryAllocator::kTypeIdTransitioning:
// Active, free, or transitioning: add it to the list of references
// for later analysis.
memory_references_.insert(ref);
break;
}
}
// Clear out any old information.
analyzers_.clear();
process_data_.clear();
process_ids_.clear();
std::set<ProcessId> seen_pids;
// Go through all the known references and create objects for them with
// snapshots of the current state.
for (PersistentMemoryAllocator::Reference memory_ref : memory_references_) {
// Get the actual data segment for the tracker. Any type will do since it
// is checked below.
void* const base = allocator_->GetAsArray<char>(
memory_ref, PersistentMemoryAllocator::kTypeIdAny,
PersistentMemoryAllocator::kSizeAny);
const size_t size = allocator_->GetAllocSize(memory_ref);
if (!base)
continue;
switch (allocator_->GetType(memory_ref)) {
case GlobalActivityTracker::kTypeIdActivityTracker: {
// Create the analyzer on the data. This will capture a snapshot of the
// tracker state. This can fail if the tracker is somehow corrupted or
// is in the process of shutting down.
std::unique_ptr<ThreadActivityAnalyzer> analyzer(
new ThreadActivityAnalyzer(base, size));
if (!analyzer->IsValid())
continue;
analyzer->AddGlobalInformation(this);
// Track PIDs.
ProcessId pid = analyzer->GetProcessId();
if (seen_pids.find(pid) == seen_pids.end()) {
process_ids_.push_back(pid);
seen_pids.insert(pid);
}
// Add this analyzer to the map of known ones, indexed by a unique
// thread
// identifier.
DCHECK(!base::Contains(analyzers_, analyzer->GetThreadKey()));
analyzer->allocator_reference_ = ref;
analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
} break;
case GlobalActivityTracker::kTypeIdProcessDataRecord: {
// Get the PID associated with this data record.
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
DCHECK(!base::Contains(process_data_, process_id));
// Create a snapshot of the data. This can fail if the data is somehow
// corrupted or the process shutdown and the memory being released.
UserDataSnapshot& snapshot = process_data_[process_id];
snapshot.process_id = process_id;
snapshot.create_stamp = create_stamp;
const ActivityUserData process_data(base, size);
if (!process_data.CreateSnapshot(&snapshot.data))
break;
// Check that nothing changed. If it did, forget what was recorded.
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
if (process_id != snapshot.process_id ||
create_stamp != snapshot.create_stamp) {
process_data_.erase(process_id);
break;
}
// Track PIDs.
if (seen_pids.find(process_id) == seen_pids.end()) {
process_ids_.push_back(process_id);
seen_pids.insert(process_id);
}
} break;
}
}
// Reverse the list of PIDs so that they get popped in the order found.
ranges::reverse(process_ids_);
}
} // namespace debug
} // namespace base

View File

@ -1,260 +0,0 @@
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_DEBUG_ACTIVITY_ANALYZER_H_
#define BASE_DEBUG_ACTIVITY_ANALYZER_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/debug/activity_tracker.h"
#include "base/memory/shared_memory_mapping.h"
#include "build/build_config.h"
namespace base {
namespace debug {
class GlobalActivityAnalyzer;
// This class provides analysis of data captured from a ThreadActivityTracker.
// When created, it takes a snapshot of the data held by the tracker and
// makes that information available to other code.
class BASE_EXPORT ThreadActivityAnalyzer {
public:
struct BASE_EXPORT Snapshot : ThreadActivityTracker::Snapshot {
Snapshot();
~Snapshot();
// The user-data snapshot for an activity, matching the |activity_stack|
// of ThreadActivityTracker::Snapshot, if any.
std::vector<ActivityUserData::Snapshot> user_data_stack;
};
// This class provides keys that uniquely identify a thread, even across
// multiple processes.
class ThreadKey {
public:
ThreadKey(ProcessId pid, int64_t tid) : pid_(pid), tid_(tid) {}
bool operator<(const ThreadKey& rhs) const {
if (pid_ != rhs.pid_)
return pid_ < rhs.pid_;
return tid_ < rhs.tid_;
}
bool operator==(const ThreadKey& rhs) const {
return (pid_ == rhs.pid_ && tid_ == rhs.tid_);
}
private:
ProcessId pid_;
int64_t tid_;
};
// Creates an analyzer for an existing activity |tracker|. A snapshot is taken
// immediately and the tracker is not referenced again.
explicit ThreadActivityAnalyzer(const ThreadActivityTracker& tracker);
// Creates an analyzer for a block of memory currently or previously in-use
// by an activity-tracker. A snapshot is taken immediately and the memory
// is not referenced again.
ThreadActivityAnalyzer(void* base, size_t size);
// Creates an analyzer for a block of memory held within a persistent-memory
// |allocator| at the given |reference|. A snapshot is taken immediately and
// the memory is not referenced again.
ThreadActivityAnalyzer(PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference);
ThreadActivityAnalyzer(const ThreadActivityAnalyzer&) = delete;
ThreadActivityAnalyzer& operator=(const ThreadActivityAnalyzer&) = delete;
~ThreadActivityAnalyzer();
// Adds information from the global analyzer.
void AddGlobalInformation(GlobalActivityAnalyzer* global);
// Returns true iff the contained data is valid. Results from all other
// methods are undefined if this returns false.
bool IsValid() { return activity_snapshot_valid_; }
// Gets the process id and its creation stamp.
ProcessId GetProcessId(int64_t* out_stamp = nullptr) {
if (out_stamp)
*out_stamp = activity_snapshot_.create_stamp;
return activity_snapshot_.process_id;
}
// Gets the name of the thread.
const std::string& GetThreadName() {
return activity_snapshot_.thread_name;
}
// Gets the TheadKey for this thread.
ThreadKey GetThreadKey() {
return ThreadKey(activity_snapshot_.process_id,
activity_snapshot_.thread_id);
}
const Snapshot& activity_snapshot() { return activity_snapshot_; }
private:
friend class GlobalActivityAnalyzer;
// The snapshot of the activity tracker taken at the moment of construction.
Snapshot activity_snapshot_;
// Flag indicating if the snapshot data is valid.
bool activity_snapshot_valid_;
// A reference into a persistent memory allocator, used by the global
// analyzer to know where this tracker came from.
PersistentMemoryAllocator::Reference allocator_reference_ = 0;
};
// This class manages analyzers for all known processes and threads as stored
// in a persistent memory allocator. It supports retrieval of them through
// iteration and directly using a ThreadKey, which allows for cross-references
// to be resolved.
// Note that though atomic snapshots are used and everything has its snapshot
// taken at the same time, the multi-snapshot itself is not atomic and thus may
// show small inconsistencies between threads if attempted on a live system.
class BASE_EXPORT GlobalActivityAnalyzer {
public:
struct ProgramLocation {
int module;
uintptr_t offset;
};
using ThreadKey = ThreadActivityAnalyzer::ThreadKey;
// Creates a global analyzer from a persistent memory allocator.
explicit GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator);
GlobalActivityAnalyzer(const GlobalActivityAnalyzer&) = delete;
GlobalActivityAnalyzer& operator=(const GlobalActivityAnalyzer&) = delete;
~GlobalActivityAnalyzer();
// Creates a global analyzer using a given persistent-memory |allocator|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator);
#if !BUILDFLAG(IS_NACL)
// Creates a global analyzer using the contents of a file given in
// |file_path|.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithFile(
const FilePath& file_path);
#endif // !BUILDFLAG(IS_NACL)
// Like above but accesses an allocator in a mapped shared-memory segment.
static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemory(
base::ReadOnlySharedMemoryMapping mapping);
// Iterates over all known valid processes and returns their PIDs or zero
// if there are no more. Calls to GetFirstProcess() will perform a global
// snapshot in order to provide a relatively consistent state across the
// future calls to GetNextProcess() and GetFirst/NextAnalyzer(). PIDs are
// returned in the order they're found meaning that a first-launched
// controlling process will be found first. Note, however, that space
// freed by an exiting process may be re-used by a later process.
ProcessId GetFirstProcess();
ProcessId GetNextProcess();
// Iterates over all known valid analyzers for the a given process or returns
// null if there are no more.
//
// GetFirstProcess() must be called first in order to capture a global
// snapshot! Ownership stays with the global analyzer object and all existing
// analyzer pointers are invalidated when GetFirstProcess() is called.
ThreadActivityAnalyzer* GetFirstAnalyzer(ProcessId pid);
ThreadActivityAnalyzer* GetNextAnalyzer();
// Gets the analyzer for a specific thread or null if there is none.
// Ownership stays with the global analyzer object.
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
// Extract user data based on a reference and its identifier.
ActivityUserData::Snapshot GetUserDataSnapshot(ProcessId pid,
uint32_t ref,
uint32_t id);
// Extract the data for a specific process. An empty snapshot will be
// returned if the process is not known.
const ActivityUserData::Snapshot& GetProcessDataSnapshot(ProcessId pid);
// Gets all log messages stored within.
std::vector<std::string> GetLogMessages();
// Gets modules corresponding to a pid. This pid must come from a call to
// GetFirst/NextProcess. Only modules that were first registered prior to
// GetFirstProcess's snapshot are returned.
std::vector<GlobalActivityTracker::ModuleInfo> GetModules(ProcessId pid);
// Gets the corresponding "program location" for a given "program counter".
// This will return {0,0} if no mapping could be found.
ProgramLocation GetProgramLocationFromAddress(uint64_t address);
// Returns whether the data is complete. Data can be incomplete if the
// recording size quota is hit.
bool IsDataComplete() const;
private:
using AnalyzerMap =
std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
struct UserDataSnapshot {
// Complex class needs out-of-line ctor/dtor.
UserDataSnapshot();
UserDataSnapshot(const UserDataSnapshot& rhs);
UserDataSnapshot(UserDataSnapshot&& rhs);
~UserDataSnapshot();
ProcessId process_id;
int64_t create_stamp;
ActivityUserData::Snapshot data;
};
// Finds, creates, and indexes analyzers for all known processes and threads.
void PrepareAllAnalyzers();
// The persistent memory allocator holding all tracking data.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
// The time stamp when analysis began. This is used to prevent looking into
// process IDs that get reused when analyzing a live system.
int64_t analysis_stamp_;
// The iterator for finding tracking information in the allocator.
PersistentMemoryAllocator::Iterator allocator_iterator_;
// A set of all interesting memory references found within the allocator.
std::set<PersistentMemoryAllocator::Reference> memory_references_;
// A set of all process-data memory references found within the allocator.
std::map<ProcessId, UserDataSnapshot> process_data_;
// A set of all process IDs collected during PrepareAllAnalyzers. These are
// popped and returned one-by-one with calls to GetFirst/NextProcess().
std::vector<ProcessId> process_ids_;
// A map, keyed by ThreadKey, of all valid activity analyzers.
AnalyzerMap analyzers_;
// The iterator within the analyzers_ map for returning analyzers through
// first/next iteration.
AnalyzerMap::iterator analyzers_iterator_;
ProcessId analyzers_iterator_pid_;
};
} // namespace debug
} // namespace base
#endif // BASE_DEBUG_ACTIVITY_ANALYZER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@
#include <string.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
@ -300,6 +301,27 @@ void PrintToStderr(const char* output) {
std::ignore = HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output)));
}
#if BUILDFLAG(IS_LINUX)
void AlarmSignalHandler(int signal, siginfo_t* info, void* void_context) {
// We have seen rare cases on AMD linux where the default signal handler
// either does not run or a thread (Probably an AMD driver thread) prevents
// the termination of the gpu process. We catch this case when the alarm fires
// and then call exit_group() to kill all threads of the process. This has
// resolved the zombie gpu process issues we have seen on our context lost
// test.
// Note that many different calls were tried to kill the process when it is in
// this state. Only 'exit_group' was found to cause termination and it is
// speculated that only this works because only this exit kills all threads in
// the process (not simply the current thread).
// See: http://crbug.com/1396451.
PrintToStderr(
"Warning: Default signal handler failed to terminate process.\n");
PrintToStderr("Calling exit_group() directly to prevent timeout.\n");
// See: https://man7.org/linux/man-pages/man2/exit_group.2.html
syscall(SYS_exit_group, EXIT_FAILURE);
}
#endif // BUILDFLAG(IS_LINUX)
void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
// NOTE: This code MUST be async-signal safe.
// NO malloc or stdio is allowed here.
@ -520,11 +542,27 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
PrintToStderr(
"Calling _exit(EXIT_FAILURE). Core file will not be generated.\n");
_exit(EXIT_FAILURE);
#endif // !BUILDFLAG(IS_LINUX)
#else // BUILDFLAG(IS_LINUX)
// After leaving this handler control flow returns to the point where the
// signal was raised, raising the current signal once again but executing the
// default handler instead of this one.
// Set an alarm to trigger in case the default handler does not terminate
// the process. See 'AlarmSignalHandler' for more details.
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_flags = static_cast<int>(SA_RESETHAND);
action.sa_sigaction = &AlarmSignalHandler;
sigemptyset(&action.sa_mask);
sigaction(SIGALRM, &action, nullptr);
// 'alarm' function is signal handler safe.
// https://man7.org/linux/man-pages/man7/signal-safety.7.html
// This delay is set to be long enough for the real signal handler to fire but
// shorter than chrome's process watchdog timer.
constexpr unsigned int kAlarmSignalDelaySeconds = 5;
alarm(kAlarmSignalDelaySeconds);
#endif // !BUILDFLAG(IS_LINUX)
}
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {

View File

@ -668,7 +668,12 @@ FeatureList::OverrideState FeatureList::GetOverrideState(
const Feature& feature) const {
DCHECK(initialized_);
DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
DCHECK(CheckFeatureIdentity(feature)) << feature.name;
DCHECK(CheckFeatureIdentity(feature))
<< feature.name
<< " has multiple definitions. Either it is defined more than once in "
"code or (for component builds) the code is built into multiple "
"components (shared libraries) without a corresponding export "
"statement";
// If caching is disabled, always perform the full lookup.
if (!g_cache_override_state)

View File

@ -13,7 +13,7 @@ namespace base::features {
// backed by executable files.
BASE_FEATURE(kEnforceNoExecutableFileHandles,
"EnforceNoExecutableFileHandles",
FEATURE_DISABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
// Optimizes parsing and loading of data: URLs.
BASE_FEATURE(kOptimizeDataUrls, "OptimizeDataUrls", FEATURE_ENABLED_BY_DEFAULT);

View File

@ -75,9 +75,7 @@ class BASE_EXPORT FileDescriptorWatcher {
// Controller is deleted, ownership of |watcher_| is transfered to a delete
// task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
// deleted while it is being used by the MessageLoopForIO.
//
// TODO(crbug.com/1298696): Breaks base_unittests.
raw_ptr<Watcher, DanglingUntriagedDegradeToNoOpWhenMTE> watcher_;
raw_ptr<Watcher, DanglingUntriaged> watcher_;
// An event for the watcher to notify controller that it's destroyed.
// As the |watcher_| is owned by Controller, always outlives the Watcher.

View File

@ -350,9 +350,9 @@ class BASE_EXPORT FilePath {
// Returns a FilePath by appending a separator and the supplied path
// component to this object's path. Append takes care to avoid adding
// excessive separators if this object's path already ends with a separator.
// If this object's path is kCurrentDirectory, a new FilePath corresponding
// only to |component| is returned. |component| must be a relative path;
// it is an error to pass an absolute path.
// If this object's path is kCurrentDirectory ('.'), a new FilePath
// corresponding only to |component| is returned. |component| must be a
// relative path; it is an error to pass an absolute path.
[[nodiscard]] FilePath Append(StringPieceType component) const;
[[nodiscard]] FilePath Append(const FilePath& component) const;
[[nodiscard]] FilePath Append(const SafeBaseName& component) const;

View File

@ -46,11 +46,11 @@ class BASE_EXPORT FilePathWatcher {
// within the directory are watched.
kRecursive,
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_APPLE)
// Indicates that the watcher should watch the given path only (neither
// ancestors nor descendants). The watch fails if the path does not exist.
kTrivial,
#endif // BUILDFLAG(IS_MAC)
#endif // BUILDFLAG(IS_APPLE)
};
// Flags are a generalization of |Type|. They are used in the new

View File

@ -390,16 +390,26 @@ bool IsPathSafeToSetAclOn(const FilePath& path) {
if (g_extra_allowed_path_for_no_execute) {
valid_paths.push_back(g_extra_allowed_path_for_no_execute);
}
// MakeLongFilePath is needed here because temp files can have an 8.3 path
// under certain conditions. See comments in base::MakeLongFilePath.
base::FilePath long_path = base::MakeLongFilePath(path);
DCHECK(!long_path.empty()) << "Cannot get long path for " << path;
for (const auto path_type : valid_paths) {
base::FilePath valid_path;
if (base::PathService::Get(path_type, &valid_path)) {
if (!base::PathService::Get(path_type, &valid_path)) {
DLOG(FATAL) << "Cannot get path for pathservice key " << path_type;
continue;
}
// Temp files can sometimes have an 8.3 path. See comments in
// base::MakeLongFilePath.
if (base::MakeLongFilePath(valid_path).IsParent(path)) {
base::FilePath full_path = base::MakeLongFilePath(valid_path);
DCHECK(!full_path.empty()) << "Cannot get long path for " << valid_path;
if (full_path.IsParent(long_path)) {
return true;
}
}
}
return false;
}
@ -1100,9 +1110,7 @@ bool PreventExecuteMapping(const FilePath& path) {
return true;
}
// MakeLongFilePath is needed here because temp files can have an 8.3 path
// under certain conditions. See comments in base::MakeLongFilePath.
bool is_path_safe = IsPathSafeToSetAclOn(base::MakeLongFilePath(path));
bool is_path_safe = IsPathSafeToSetAclOn(path);
if (!is_path_safe) {
// To mitigate the effect of past OS bugs where attackers are able to use
@ -1137,7 +1145,7 @@ bool PreventExecuteMapping(const FilePath& path) {
// dangerous path is being passed to a renderer, which is inherently unsafe.
//
// If this check hits, please do not ignore it but consult security team.
NOTREACHED() << "Unsafe to deny execute access to path : " << path;
DLOG(FATAL) << "Unsafe to deny execute access to path : " << path;
return false;
}

View File

@ -457,6 +457,16 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
created_ = true;
if (flags & FLAG_WIN_NO_EXECUTE) {
// These two DCHECKs make sure that no callers are trying to remove
// execute permission from a file that might need to be mapped executable
// later. If they hit in code then the file should not have
// FLAG_WIN_NO_EXECUTE flag, but this will mean that the file cannot be
// passed to renderers.
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".exe"),
path.Extension()));
DCHECK(!base::FilePath::CompareEqualIgnoreCase(FILE_PATH_LITERAL(".dll"),
path.Extension()));
// It is possible that the ACE could not be added if the file was created
// in a path for which the caller does not have WRITE_DAC access. In this
// case, ignore the error since if this is occurring then it's likely the

Some files were not shown because too many files have changed in this diff Show More