Compare commits

...

No commits in common. "545c85631134e23e79bfad22721dc310d0592191" and "97915a596d1e05b691a7d535ee0d36215ef2b064" have entirely different histories.

2839 changed files with 89778 additions and 135002 deletions

View File

@ -1 +1 @@
105.0.5195.52
104.0.5112.79

View File

@ -22,7 +22,6 @@ Abhinav Vij <abhinav.vij@samsung.com>
Abhishek Agarwal <abhishek.a21@samsung.com>
Abhishek Kanike <abhishek.ka@samsung.com>
Abhishek Singh <abhi.rathore@samsung.com>
Abin K Paul <abin.paul1@gmail.com>
Abul Hasan Md Osama <ahm.osama@samsung.com>
Adam Bonner <abonner-chromium@solscope.com>
Adam Bujalski <abujalski@gmail.com>
@ -58,7 +57,6 @@ Alex Gaynor <alex.gaynor@gmail.com>
Alex Henrie <alexhenrie24@gmail.com>
Alex Scheele <alexscheele@gmail.com>
Alexander Douglas <agdoug@amazon.com>
Alexander Forrence <alex.forrence@gmail.com>
Alexander Guettler <alexander@guettler.io>
Alexander Rezepkin <etu@vivaldi.net>
Alexander Shalamov <alexander.shalamov@intel.com>
@ -183,7 +181,6 @@ Brian Yip <itsbriany@gmail.com>
Brook Hong <hzgmaxwell@gmail.com>
Bruno Calvignac <bruno@flock.com>
Bruno de Oliveira Abinader <brunoabinader@gmail.com>
Bruno Pitrus <brunopitrus@hotmail.com>
Bruno Roy <brusi_roy@hotmail.com>
Bryan Donlan <bdonlan@gmail.com>
Bryce Thomas <bryct@amazon.com>
@ -385,7 +382,6 @@ Gajendra Singh <wxjg68@motorola.com>
Ganesh Borle <ganesh.borle@samsung.com>
Gao Chun <chun.gao@intel.com>
Gao Chun <gaochun.dev@gmail.com>
Gao Yu <wanggao@tencent.com>
Gaurav Dhol <gaurav.dhol@einfochips.com>
Gautham Banasandra <gautham.bangalore@gmail.com>
George Adams <geoada@amazon.com>
@ -475,7 +471,6 @@ Irmak Kavasoglu <irmakkavasoglu@gmail.com>
Isaac Murchie <murchieisaac@gmail.com>
Isaac Reilly <reillyi@amazon.com>
Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com>
Jack Bates <jack@nottheoilrig.com>
Jacky Hu <flameddd@gmail.com>
@ -517,7 +512,6 @@ Jay Soffian <jaysoffian@gmail.com>
Jeado Ko <haibane84@gmail.com>
Jeffrey C <jeffreyca16@gmail.com>
Jeffrey Yeung <jeffrey.yeung@poly.com>
Jelle Bleyaert <jellebley@gmail.com>
Jeong A Shin <jeonga@khu.ac.kr>
Jeongeun Kim <je_julie.kim@samsung.com>
Jeongmin Kim <kimwjdalsl@gmail.com>
@ -706,7 +700,6 @@ Lenny Khazan <lenny.khazan@gmail.com>
Leo Wolf <jclw@ymail.com>
Leon Han <leon.han@intel.com>
Leung Wing Chung <lwchkg@gmail.com>
Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com>
Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com>
@ -914,7 +907,6 @@ Pedro Tôrres <t0rr3s.p3dr0@gmail.com>
Peng Hu <penghu@tencent.com>
Peng Jiang <leiyi.jp@gmail.com>
Peng Xinchao <pxinchao@gmail.com>
Peng Zhou <zhoupeng.1996@bytedance.com>
Peng-Yu Chen <pengyu@libstarrify.so>
Pei Wang <wangpei@uniontech.com>
Peter Bright <drpizza@quiscalusmexicanus.org>
@ -946,7 +938,6 @@ Praveen Akkiraju <praveen.anp@samsung.com>
Preeti Nayak <preeti.nayak@samsung.com>
Pritam Nikam <pritam.nikam@samsung.com>
Puttaraju R <puttaraju.r@samsung.com>
Qi Tiezheng <qitiezheng@360.cn>
Qi Yang <qi1988.yang@samsung.com>
Qiang Zeng <zengqiang1@huawei.com>
Qiankun Miao <qiankun.miao@intel.com>
@ -1149,12 +1140,10 @@ Sungmann Cho <sungmann.cho@navercorp.com>
Sunil Ratnu <sunil.ratnu@samsung.com>
Sunitha Srivatsa <srivats@amazon.com>
Sunwoo Nam <jegalzz88@gmail.com>
Suresh Guttula <suresh.guttula@amd.corp-partner.google.com>
Surya K M <suryagowda590@gmail.com>
Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com>
Suvanjan Mukherjee <suvanjanmukherjee@gmail.com>
Suyambulingam R M <suyambu.rm@samsung.com>
Suyash Nayan <suyashnyn1@gmail.com>
Suyash Sengar <suyash.s@samsung.com>
Swarali Raut <swarali.sr@samsung.com>
Swati Jaiswal <swa.jaiswal@samsung.com>
@ -1170,7 +1159,6 @@ Taehoon Lee <taylor.hoon@gmail.com>
Taeseong Yu <yugeeklab@gmail.com>
Taeyeon Kim <ssg9732@gmail.com>
Tae Shin <taeshindev@gmail.com>
Taher Ali <taher.dasten@gmail.com>
Takaaki Suzuki <takaakisuzuki.14@gmail.com>
Takahiro Aoyagi <hogehoge@gachapin.jp>
Takashi Fujita <tgfjt.mail@gmail.com>
@ -1341,7 +1329,6 @@ Zheng Xu <zxu@kobo.com>
Zhengkun Li <zhengkli@amazon.com>
Zhenyu Liang <zhenyu.liang@intel.com>
Zhenyu Shan <zhenyu.shan@intel.com>
Zhibo Wang <zhibo1.wang@intel.com>
Zhifei Fang <facetothefate@gmail.com>
Zhiyuan Ye <zhiyuanye@tencent.com>
Zhuoyu Qian <zhuoyu.qian@samsung.com>
@ -1396,7 +1383,6 @@ Loongson Technology Corporation Limited. <*@loongson.cn>
Macadamian <*@macadamian.com>
Mail.ru Group <*@corp.mail.ru>
Make Positive Provar Limited <*@provartesting.com>
Mc Zeng <zengmcong@gmail.com>
Mediatek <*@mediatek.com>
Microsoft <*@microsoft.com>
MIPS Technologies, Inc. <*@mips.com>

336
src/DEPS
View File

@ -144,19 +144,6 @@ vars = {
# tools/clang/OWNERS before depending on it.
'checkout_clang_libs': 'use_rust',
# Fetch prebuilt and prepackaged Bazel binary/executable. Bazel is currently
# only needed by `chromium/src/tools/rust/build_crubit.py` and therefore
# shouldn't be used outside of Chromium Rust Experiments project.
# Furthermore note that Bazel is only needed when building Crubit during Rust
# toolchain build (and is *not* needed during regular Chromium builds).
'checkout_bazel': False,
# Fetch Crubit support libraries in order to build ..._rs_api.rs and
# ..._rs_api_impl.cc that are generated by prebuilt (see
# tools/rust/build_crubit.py) Crubit tools during Chromium build (see
# also //build/rust/rs_bindings_from_cc.gni).
'checkout_crubit': 'use_rust',
# By default checkout the OpenXR loader library only on Windows. The OpenXR
# backend for VR in Chromium is currently only supported for Windows, but
# support for other platforms may be added in the future.
@ -197,12 +184,6 @@ vars = {
# qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False,
# Revision of Crubit (trunk on 2022-07-14). This should typically be the
# same as the revision specified in CRUBIT_REVISION in
# tools/rust/update_rust.py. More details and roll instructions can be
# found in tools/rust/README.md.
'crubit_revision': 'd9b0ad4c09b46328dcc7a5ec28ce86cca56e0389',
# Run 'vpython_common' hook if this is set.
# TODO(crbug.com/1329052): remove this when we remove .vpython.
'enable_vpython_common_crbug_1329052': True,
@ -233,7 +214,7 @@ vars = {
#
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '14977.0.0',
'lacros_sdk_version': '14844.0.0',
# Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates
@ -245,7 +226,7 @@ vars = {
# luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:a0ba80649473055bae3d789eec28c9967adb5e45',
'luci_go': 'git_revision:de014227dd270df7c61bfab740eb4ae4b52ac2a7',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision.
@ -269,9 +250,6 @@ vars = {
# for info on RTS
'checkout_rts_model': False,
# Use the experimental version of the RTS model
'checkout_rts_experimental_model': False,
# By default, do not check out the re-client binaries.
'checkout_reclient': False,
@ -279,7 +257,7 @@ vars = {
'dawn_standalone': False,
# reclient CIPD package version
'reclient_version': 're_client_version:0.72.0.b874055-gomaip',
'reclient_version': 're_client_version:0.68.0.2c94334-gomaip',
# Enable fetching Rust-related packages.
'use_rust': False,
@ -297,34 +275,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': 'f204b137b97b44b7397de173fc54181c37ac6501',
'skia_revision': '5a4dbb2e97302f7e574f0ba962ac4d8b3a62a36e',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': 'b1f56b4a8a7cf9f707f7966104278777f9994b13',
'v8_revision': '3d67ad243ce92b9fb162cc85da1dc1a0ebe4c78b',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': '2f0d8ab049b10ee41f9b90cea8da8e80db076e38',
'angle_revision': '2693b03eba82a424a19febaacaab4115a45b7682',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': '16e026a959f1bc80ff237aa81b4a63b52517dec1',
'swiftshader_revision': 'd68d367047f562e0e0716fd72b9a2bc3e2c8f4c0',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': 'd14da8e682e244127db32490365d1c094243e5f3',
'pdfium_revision': 'c7c276ce1192f043affb2098ac7ce44f7fd7f084',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': 'b95124305ab15c7523d3e21437309fa5dd717ee8',
'boringssl_revision': '1e469e45a46ff580899cbef939babe02ad916c85',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:9.20220720.2.1',
'fuchsia_version': 'version:8.20220609.0.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -340,23 +318,23 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling lss
# and whatever else without interference from each other.
'lss_revision': '0d6435b731ef91d5182eaecff82ae96764222c48',
'lss_revision': '1d387f43f3702818f7fc04d334539a2f05cafccd',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other.
'nacl_revision': '18d9964d47fc44f49a4c19b7ba91197ddca00c6a',
'nacl_revision': '77f46a8ec552f316f64aaa82b7f06da9fceb036b',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': '275b116b40c9d183d42242099ea9ff276985855b',
'freetype_revision': 'b11074cf6dce78d0bc79ff7996dec70ca3abe4a9',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_testing_revision': '1ca0c99d25ae3b1e4c70513c1bf74643fc3dee09',
'freetype_testing_revision': 'fe2eddae6176f75e2101289eeda22a5ff3d808ca',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other.
'harfbuzz_revision': 'fa471043fccb94444510e3300ac2573297c82137',
'harfbuzz_revision': 'c88a6a9ec3c38793ec8b662362282e076e948943',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other.
@ -364,11 +342,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling OTS
# and whatever else without interference from each other.
'ots_revision': '46bea9879127d0ff1c6601b078e2ce98e83fcd33',
'ots_revision': 'ee537ac096667eed6559124164c3e8482646fd77',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': 'b6e934ef32e6591ad60636e3fe167d0e3e9aa5d4',
'catapult_revision': '8a8c0b9c3967e17da1d9f64b3d57e4a969b465a7',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
@ -376,11 +354,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': 'b1ac4239dc5fffa56170e7367a03f35d2eaa223c',
'devtools_frontend_revision': 'ed8fe6a3c528058bcc8e95b9071be6d6cb1b45f2',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
'libprotobuf-mutator': 'a304ec48dcf15d942607032151f7e9ee504b5dcf',
'libprotobuf-mutator': '8942a9ba43d8bb196230c321d46d6a137957a719',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
# and whatever else without interference from each other.
@ -412,11 +390,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': '1e98a9ba4a64301e0ab932e22ce989688f6cdf6c',
'dawn_revision': '2d334ed4f01b00211acd0360a5f9dc91d83354cb',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': '53c94d968dd6a0cf748caf42462a0b676f95530c',
'quiche_revision': '26920de2d369289246585fa1b236ef2ef516d317',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -428,7 +406,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling wuffs
# and whatever else without interference from each other.
'wuffs_revision': 'a8205c2fe7564b12fea81ee028ba670112cc7719',
'wuffs_revision': 'ebbecaa2fb439eff0aeedafadb4c2a984446dee8',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libgifcodec
# and whatever else without interference from each other.
@ -436,11 +414,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other.
'libavif_revision': 'dd2d67c5f976038354c0406a253e26dd2abc4632',
'libavif_revision': '1dedea43710c55a1bf1c675126cb6cb546c7b2a7',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': 'd2c401112cc577fe3c5f9a11329bb557048af31a',
'nearby_revision': 'd8453565db739ebbfbbeade9ec8f8239057b1e13',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -456,15 +434,15 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': 'b954e3e65634a9e2f7b595598a30c455f5f2eb26',
'libcxxabi_revision': '013bcd820a353ec1d3ff472f2971553fea7232cf',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': '955e2ff5fbb15791fea263c1c80e1ec6b3c5ee61',
'libunwind_revision': '1644d070bf374182739543c3a12df88dfec0b88a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'clang_format_revision': '8b525d2747f2584fc35d8c7e612e66f377858df7',
'clang_format_revision': 'e435ad79c17b1888b34df88d6a30a094936e3836',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -473,17 +451,13 @@ vars = {
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'highway_revision': '424360251cdcfc314cfc528f53c872ecd63af0f0',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': '880df5ede50a8534c8116d0d50e4bc4f3ef08a06',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': '88bf4070487fbe9020697a2281743b91e5e29bef',
'libcxx_revision': 'b1269813eaf5b8ac78e35e45a0f7cc320bd3e7d6',
# GN CIPD package version.
'gn_version': 'git_revision:9ef321772ecc161937db69acb346397e0ccc484d',
'gn_version': 'git_revision:2ecd43a10266bd091c98e6dcde507c64f6a0dad3',
}
# Only these hosts are allowed for dependencies in this DEPS file.
@ -564,7 +538,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_rust_toolchain/linux-amd64',
'version': 'rMU9JFlwRfB-5VEWgDPRFYme5sXSnsHPSE3uQXf1xBQC',
'version': 'BKAbvHjGv4-os-v8MS3I54bPsY-397xgaJ3yBeIVS20C',
},
],
'dep_type': 'cipd',
@ -634,7 +608,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': 'RBRqNIwSXHjdZf4BVjWk8enaKIHw58aQGFDVNozlbWIC',
'version': '3UoNd4X57YAONz6mhQPapIUoO1Ds6K5AIHTAZxNmLUUC',
},
],
},
@ -645,7 +619,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'tqlS-vFYsn2LVSJMwipq84EKLmwKa1XJb760NnpQL2gC',
'version': 'vTjr7sgsZvG680Qx0nhiR3H4-EAJDdh5m1BkVbbuER0C',
},
],
},
@ -656,7 +630,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'RthX5RzppvSV7uq2P6pm2bnv6-dvoHpUIOsZFk57ZMEC',
'version': 'dQu9E9-wPljBkp6aaMxzCt9WJdBI_u_w2N1VRS2Mdf4C',
},
],
},
@ -668,7 +642,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': '7MvxvS-pmZP1iAXQoCiLI7nv4UkDiyw8PC1ycwpYWbYC',
'version': '1RmnK4JbmordT5NJbib3mfagpHvrO-oJaTW4HKsYgmAC',
},
],
},
@ -680,7 +654,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'd5lN7fzV07O4-Mu_T8TshrGQtlR9F508p9cdhchcLpYC',
'version': '-o_WPHLr3JzvHdsTl_E0AgEV2D2--sBNqVT_F74hvOIC',
},
],
},
@ -691,7 +665,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': '8zehx-DVmaf_FplPe23acLAStf3Z7anQ3CY9LXBfvD0C',
'version': 'zdcQMuiuIXnQQ4eBsYyk0uNTr4N0LaphcQYs0RUByZQC',
},
],
},
@ -702,7 +676,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'KE8JnjZFOyHxUhVdRkm0IMVqlZIaYPnAOI-zxtUD4zUC',
'version': '77COFCYoGCHrnvDJZFC8GoYea4B8GeTVWNKpUZBCwe4C',
},
],
},
@ -773,16 +747,16 @@ deps = {
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '3965ba67f8d283378e6c0b64d634b91fb830a378',
'url': Var('chromium_git') + '/website.git' + '@' + 'ffb0b23dc2833849a0b1e57f25caba219e717963',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '53a2982c85ac6cf802719603d037ad3be7091ebb',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '16126e61e3b831b005a91d83cecefd54628530bb',
'condition': 'checkout_ios',
},
'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '3d3dcee71993376f3abcf3457b046c1df6c13182',
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '07ef2f35406e9f04cf86d11da5cc62d58f36cb15',
'condition': 'checkout_ios',
},
@ -791,8 +765,13 @@ deps = {
'condition': 'checkout_ios',
},
'src/ios/third_party/gcdwebserver/src': {
'url': Var('chromium_git') + '/external/github.com/swisspol/GCDWebServer.git' + '@' + '43555c66627f6ed44817855a0f6d465f559d30e0',
'condition': 'checkout_ios',
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '425d641798c86ab809fcb067bbb265958756af98',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '8225446a8df15e53e96304852f134259e0e5e2df',
'condition': 'checkout_ios',
},
@ -817,7 +796,7 @@ deps = {
},
'src/ios/third_party/material_text_accessibility_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-foundation/material-text-accessibility-ios.git' + '@' + '8cd910c1c8bbae261ae0d7e873ed96c69a386448',
'url': Var('chromium_git') + '/external/github.com/material-foundation/material-text-accessibility-ios.git' + '@' + '197375261e25ee5d473219d0f353a1f635f5393d',
'condition': 'checkout_ios',
},
@ -862,7 +841,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'XlzIsX8AH06QHVAMzpKt5aT3nfupjnBr78ztG18pXdsC',
'version': 'gpPYMz2bs2vn_NVI9vEIw6a1w2oQ7ck6CsoRISA8fgoC',
},
],
'dep_type': 'cipd',
@ -873,7 +852,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'CPhzNoasDtJ45F8bwTLs7lIQDiy-PhdReFmXrlL5FDoC',
'version': 'swy7IasO6Zn8zMTIWXP3ixCvml7S_2vUby60ZuPlFdEC',
},
],
'dep_type': 'cipd',
@ -884,24 +863,13 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'XmZtpYZGxTqwBMgEXpdyfrdCxx79QfYeVlhpviF2OUcC',
'version': 'eIvALKHzZkCX0uLI5LLb8fpo2ckXFiu_dh4_8SdQyE8C',
},
],
'dep_type': 'cipd',
'condition': 'checkout_rts_model and checkout_win',
},
'src/testing/rts/rts-ml-chromium/linux-amd64': {
'packages': [
{
'package': 'experimental/google.com/sshrimp/chromium/rts/model/linux-amd64',
'version': '3K1dz8hGV_xBeEcPKmXfrPYWCwXdRf6KVVLrg7AuJ0sC',
},
],
'dep_type': 'cipd',
'condition': 'checkout_rts_experimental_model and checkout_linux',
},
'src/tools/luci-go': {
'packages': [
{
@ -952,7 +920,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': 'gAsD4l8EoP_W0IH5vzedZ1tyN3-wAP8-fqkaS_mX6rcC',
'version': 'rMO5RMLXC5cyz-Lk9zE9bV66Gc8ggeO7EPkCxYXNO2oC',
},
],
'condition': 'checkout_android',
@ -985,7 +953,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'hf9C5IyJUUGaBnzqu60xiFJSyfAmjqjc_PiNXNVc9l0C',
'version': 'jziPmg_EUjoj-eSkO24eMaf5ylm449Q7BKO1-ga2tbgC',
},
],
'condition': 'checkout_android',
@ -996,29 +964,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'qLkNwA6wjoqznVqaa151GelgGBP4X495n0z-jluACPcC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_build_tools/lint': {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'INnGGTfg5gGJutJiBtWI6-QwusHDDnKvZzI53Q3UiecC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_build_tools/manifest_merger': {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '0WkAedh1tJB8lzisWJRT80UjpacKLltuV7NqP-0tx9gC',
'version': 'AqsPZpWJh-ZyGraHKlbH8XgjRnmyDmolX4HhwPEo9XUC',
},
],
'condition': 'checkout_android',
@ -1058,7 +1004,7 @@ deps = {
},
{
'package': 'chromium/third_party/android_sdk/public/cmdline-tools',
'version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C',
'version': 'PGPmqJtSIQ84If155ba7iTU846h5WJ-bL5d_OoUWEWYC',
},
],
'condition': 'checkout_android_native_support',
@ -1069,7 +1015,7 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '1d7dd0490808a8a972949521cc314e42d085c69f',
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'd2a0b6188bcbae674f8ef2c42c7cffc908ac632e',
'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1101,6 +1047,17 @@ deps = {
'src/third_party/boringssl/src':
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/bouncycastle': {
'packages': [
{
'package': 'chromium/third_party/bouncycastle',
'version': 'c078e87552ba26e776566fdaf0f22cd8712743d0',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'c4c43b80ea8854c57a4374ac32579b577172dc23',
@ -1127,7 +1084,7 @@ deps = {
},
'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + '8ba5ff47563d0ca8233e8fa009377ed14a560cf4',
Var('chromium_git') + '/cast_core/public' + '@' + '6c053df4fe5aea168ca651e2d95dc5dc40ebe059',
'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1156,7 +1113,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '60350fcfeb0bca00eba2794f53661a1b996a79a5',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '41d2e02e8d1438445eee7463ae4536fe6280e0fc',
'condition': 'checkout_chromeos',
},
@ -1174,17 +1131,12 @@ deps = {
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'f3b1373caf7bd717be4f0d21ab8c738c6bfcf418',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '315a946dfdf2b0d850e35ec2dbca7ad45525c6ff',
'condition': 'checkout_linux',
},
'src/third_party/crubit/src': {
'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'),
'condition': 'checkout_crubit',
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '0ba2fd429dd6db431fcbee6995c1278d2a3657a0',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a4b3602457c422f8ed054ff3673baaecedc2972a',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
@ -1193,7 +1145,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '0e187141679fdb91da33249d18cb79a011c0e2ea',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b02c384ef4e8eba7b8bdef16f9dc6f8f4d6a6b2b',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
@ -1215,13 +1167,13 @@ deps = {
Var('chromium_git') + '/external/github.com/google/farmhash.git' + '@' + '816a4ae622e964763ca0862d9dbd19324a1eaf45',
'src/third_party/ffmpeg':
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + Var('ffmpeg_revision'),
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + 'abfc2628f25d283c27ffc960a8ff820ae8110467',
'src/third_party/flac':
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + 'af862024c8c8fa0ae07ced05e89013d881b00596',
'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + '0fe13cb28ce5a3fb81f654b21cb37c9821194962',
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'b8aaccee8248059b2af032cca0eb1d2ddbdb6cdc',
# Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': {
@ -1236,7 +1188,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
'src/third_party/grpc/src': {
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + '89f7534e43cf73f56c492a9cf7eb85ca6bfbd87a',
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + 'd1338d8751231bdc0d87e732d25420e87d24cffd',
},
'src/third_party/freetype/src':
@ -1258,7 +1210,7 @@ deps = {
Var('chromium_git') + '/external/github.com/khaledhosny/ots.git' + '@' + Var('ots_revision'),
'src/third_party/libgav1/src':
Var('chromium_git') + '/codecs/libgav1.git' + '@' + 'cd53f7c0d6a1c005e38874d143c8876d375bae70',
Var('chromium_git') + '/codecs/libgav1.git' + '@' + '38ca0c62d9079820bbb872d81738770c0a28ae6d',
'src/third_party/google_toolbox_for_mac/src': {
'url': Var('chromium_git') + '/external/github.com/google/google-toolbox-for-mac.git' + '@' + Var('google_toolbox_for_mac_revision'),
@ -1296,7 +1248,7 @@ deps = {
},
'src/third_party/arcore-android-sdk/src': {
'url': Var('chromium_git') + '/external/github.com/google-ar/arcore-android-sdk.git' + '@' + 'd197af6b38f98d2344bc7f76326c4aa0c4f2fb90',
'url': Var('chromium_git') + '/external/github.com/google-ar/arcore-android-sdk.git' + '@' + 'eaa85941f2d724c60671bf94f46de7178baba7e6',
'condition': 'checkout_android',
},
@ -1304,7 +1256,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/arcore-android-sdk-client',
'version': 'NYf1qvBS9hNVK-6exAl-MaVbQsRKCDYl5Br936GtcU0C',
'version': 'fUSZ4jxIhIx34TxRIcrmOu76-khcka_Gpn0_t9lKCWQC',
},
],
@ -1338,7 +1290,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '6e558942cc8b83e525bdabaf987e06af8a377314',
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '12de966fcbe1d1a48dba310aee63807856ffeee8',
'src/third_party/icu4j': {
'packages': [
@ -1406,10 +1358,10 @@ deps = {
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
'src/third_party/libaddressinput/src':
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'a56be6f77ea76a9e77561ca306acede244631610',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '8dcdafc6d4a2f9f8ea8104f26eca5d123eefcb7f',
Var('aomedia_git') + '/aom.git' + '@' + '32d6783c4c9f6e78f43bd4ceffaa0eb2143aed57',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1442,7 +1394,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '22f1a22c99e9dde8cd3c72ead333f425c5a7aa77',
'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + 'c05f3bfb0990434bd12bf6697d16ed943f2203c2',
'condition': 'checkout_linux',
},
@ -1467,16 +1419,13 @@ deps = {
},
'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + '711bef67400f096416cb1ba7f6560e533871490f',
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'ca89bed50dbc5fe2abef50c5f36924bb1da6d1f6',
'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libwebp/src':
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + 'd248929c059ff7629a85333699717d7a677d8d96',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '30f9b280487be412da346aecce7834275020976e',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1567,7 +1516,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '6be6b78224a276e908b8272542d125e133c40f3f',
Var('chromium_git') + '/openscreen' + '@' + '1ea464c1b10821e39dd487b6f9e4813c5c8546b0',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1584,7 +1533,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + '361efbf9aab595e4dfa79ec48f242d9e722393c9',
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'a6b4d9563ec37e531ebcc21912c3078ba4c2b367',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1620,8 +1569,8 @@ deps = {
'src/third_party/qemu-linux-arm64': {
'packages': [
{
'package': 'fuchsia/third_party/qemu/linux-arm64',
'version': 'BpnoBb2d44_SOm9toN6Lju5a2RLGAc1TPUO6xyijoP8C'
'package': 'fuchsia/qemu/linux-arm64',
'version': 'b1b61a39e3ab0935cd030f27e01740578b04b967'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia and checkout_fuchsia_for_arm64_host',
@ -1635,7 +1584,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'auReXfxxD74XGdPdi-rYsKrp4sRwYwgNjh_W0PT7vNcC',
'version': 'iMLEt10uXASDfG2AlATR1fO8xYhBoF24nQvDDXLY6Q8C',
},
],
'condition': 'checkout_android',
@ -1659,7 +1608,7 @@ deps = {
},
'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '72155b3185246e9143f4c6a3a7f283d2ebba8524',
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '7ef39c5745a61f43071e699c6a96da41701ae59f',
'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1671,7 +1620,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'e6b63421941617bf5ccac6b4a62d7a7b4a2c3fef',
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'cb47d7089f714e4514f126dfa8ac630cab78ea32',
'src/third_party/sqlite4java': {
'packages': [
@ -1702,20 +1651,25 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ac31ffa987c14665062c00f98ec025a3fdc185ab',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '5ddefa9f8d1455c8ca694ba1a511ba82e3a88960',
'src/third_party/turbine': {
'packages': [
{
'package': 'chromium/third_party/turbine',
'version': 'RxaW8ZVJZ7rF3dbAGbL1OV6f9ZELjVeiiQbpkDsuxA4C',
'version': 'A5bkaoLBc1JYa6Xv31jYUCK5TnXYCbxRrA4FBPXXcWgC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@c42337d9ef75170244486b580bad7dfe78447bfd',
'src/third_party/ub-uiautomator/lib': {
'url': Var('chromium_git') + '/chromium/third_party/ub-uiautomator.git' + '@' + '00270549ce3161ae72ceb24712618ea28b4f9434',
'condition': 'checkout_android',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@12989fc8d7d273b5b95ef988d37469738eb9c39b',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1748,13 +1702,13 @@ deps = {
Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'd0045ec570c1a77612db35d1e92f05e1d27b4d53',
'src/third_party/webgl/src':
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + '44e4c8770158c505b03ee7feafa4859d083b0912',
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'bb289ce3cb15bbabd42fdcb01439367846d9069d',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'c4eb1df3f306c0ee3e43ba2446eb3616e42d6855',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '84183b561b7f44b2eae6a5f268d5085accb0ad7a',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'dc5cf31cad576376abd3aa6306169453cfd85ba5',
Var('webrtc_git') + '/src.git' + '@' + '06aea31d10f860ae4236e3422252557762d39188',
'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
@ -1775,7 +1729,7 @@ deps = {
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a33b227047def29b79853ef688b6dda6c6fc5386',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'da533e0114f2bf730f17853ae10556d84a3d1e89',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1784,7 +1738,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'BquSeorcTU84O2_A8IoWetGrcfLWxLfZCo9sve1Wt2IC',
'version': 'l0eo0uR9oMGZFKDucYLGk3vdNGAunP4tGrDhIfzljdcC',
},
],
'dep_type': 'cipd',
@ -1794,7 +1748,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': 'AOoQr1u4-cOIEYJDAgVxGWoTiPaRcjrSsjjAaB-u_ggC',
'version': '7YFnDYPdgUOTWkaZe19LzWhmwWBuVMM9Q02sTDQtlkgC',
},
],
'dep_type': 'cipd',
@ -1805,7 +1759,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': '-t3YY_sZ-jtMAYZ2PlhjudFnEUgk4m-HjlIwSip4tOAC',
'version': '4shFavk40_bOJI9zUzvH2PaMW7tHKuxeEN0zik75F7sC',
},
],
'dep_type': 'cipd',
@ -1816,7 +1770,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'x_xKUnqrgizoTO8mxX4RkyhpQ-nUp_x_go9YH-tc--QC',
'version': 'Ynx3axT8ZDN7Nl5pNwI5w8PCCt7jax5FGf8jcyjUzt4C',
},
],
'dep_type': 'cipd',
@ -1827,7 +1781,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@c29563eac12bc062b66805a4766673729ce7d4ef',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@4ff96f70fd1b507f17579577080637625e6e2ef8',
'condition': 'checkout_src_internal',
},
@ -1846,7 +1800,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'PEjYa5GVISxpuqCZfq9pZ3QeSWhNtWSdQ6gmJ8bizQ0C',
'version': '3AnrwGQwN1GPv9gbT4wn-9ScZNzXc5x6e68tt5yFI30C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1857,7 +1811,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': 'MqUROEBmHZCBRsEY3abQ7JOvoDr5wZ_MTK3vAN-901wC',
'version': 'Vmw_AXRtISh_bHtp8evOfKkTHoF7PksGw7-UBEamlI0C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1868,7 +1822,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'tV1aN61vvzGiDSJgQxN_namEG8pvO6RTuO-qbQMC51IC',
'version': 'XXOw6zqfIbq0PGDfk6FNetf4Bsym6nYVy42v7R5n2AoC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1879,7 +1833,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': 'Eeqz2JXdGXA3-P7iu9xSzSc3iyUuAruoN1W3-FplxR4C',
'version': 'C5cTyNZYNM9JtGlk_csAXrSxTsH8Mk2vTIU2dEfjRskC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -3206,17 +3160,6 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on',
'version': 'version:2@1.68.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup': {
'packages': [
{
@ -3309,7 +3252,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.7.0.cr1',
'version': 'version:2@1.6.21.cr1',
},
],
'condition': 'checkout_android',
@ -3320,7 +3263,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.7.0.cr1',
'version': 'version:2@1.6.21.cr1',
},
],
'condition': 'checkout_android',
@ -3624,15 +3567,6 @@ deps = {
],
'dep_type': 'cipd',
},
'src/tools/bazel': {
'packages': [{
'package': 'infra/3pp/tools/bazel_bootstrap/${{platform}}',
'version': 'version:2@5.2.0.1',
}],
'dep_type': 'cipd',
'condition': 'checkout_bazel',
},
}
@ -3666,7 +3600,6 @@ include_rules = [
'-absl',
'-third_party/abseil-cpp',
'+third_party/abseil-cpp/absl/base/attributes.h',
"+third_party/abseil-cpp/absl/functional/function_ref.h",
"+third_party/abseil-cpp/absl/numeric/int128.h",
'+third_party/abseil-cpp/absl/types/optional.h',
'+third_party/abseil-cpp/absl/types/variant.h',
@ -3686,6 +3619,20 @@ skip_child_includes = [
hooks = [
# Download and initialize "vpython" VirtualEnv environment packages for
# Python2. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and
# don't run slower than they might otherwise need to.
{
'name': 'vpython_common',
'pattern': '.',
# TODO(https://crbug.com/1205263): Run this on mac/arm too once it works.
'condition': 'not (host_os == "mac" and host_cpu == "arm64") and enable_vpython_common_crbug_1329052',
'action': [ 'vpython',
'-vpython-spec', 'src/.vpython',
'-vpython-tool', 'install',
],
},
# Download and initialize "vpython" VirtualEnv environment packages for
# Python3. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and
@ -3942,14 +3889,6 @@ hooks = [
'-s', 'src/third_party/skia',
'--header', 'src/skia/ext/skia_commit_hash.h'],
},
{
# Update dawn_version.h.
'name': 'lastchange_dawn',
'pattern': '.',
'action': ['python3', 'src/build/util/lastchange.py',
'-s', 'src/third_party/dawn',
'--revision', 'src/gpu/webgpu/DAWN_VERSION'],
},
# Pull dsymutil binaries using checked-in hashes.
{
'name': 'dsymutil_mac_arm64',
@ -4508,16 +4447,6 @@ hooks = [
],
},
{
'name': 'Download Fuchsia SDK from GCS',
'pattern': '.',
'condition': 'checkout_fuchsia',
'action': [
'python3',
'src/build/fuchsia/override_sdk.py',
],
},
{
'name': 'Download Fuchsia system images',
'pattern': '.',
@ -4754,17 +4683,6 @@ hooks = [
],
'condition': 'generate_location_tags',
},
{
# Clean up build dirs for crbug.com/1337238.
# After a libc++ roll and revert, .ninja_deps would get into a state
# that breaks Ninja on Windows.
# TODO(crbug.com/1337238): Remove in a month or so.
'name': 'del_ninja_deps_cache',
'pattern': '.',
'condition': 'host_os == "win"',
'action': ['python3', 'src/build/del_ninja_deps_cache.py'],
},
]
# Add any corresponding DEPS files from this list to chromium.exclusions in

View File

@ -159,13 +159,6 @@ buildflag_header("ios_cronet_buildflags") {
flags = [ "CRONET_BUILD=$is_cronet_build" ]
}
enable_message_pump_epoll = is_linux || is_chromeos || is_android
buildflag_header("message_pump_buildflags") {
header = "message_pump_buildflags.h"
header_dir = "base/message_loop"
flags = [ "ENABLE_MESSAGE_PUMP_EPOLL=$enable_message_pump_epoll" ]
}
# Base and everything it depends on should be a static library rather than
# a source set. Base is more of a "library" in the classic sense in that many
# small parts of it are used in many different contexts. This combined with a
@ -187,10 +180,6 @@ mixed_component("base") {
"allocator/allocator_check.h",
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
"allocator/dispatcher/dispatcher.cc",
"allocator/dispatcher/dispatcher.h",
"allocator/dispatcher/reentry_guard.cc",
"allocator/dispatcher/reentry_guard.h",
"as_const.h",
"at_exit.cc",
"at_exit.h",
@ -228,8 +217,6 @@ mixed_component("base") {
"cancelable_callback.h",
"check.cc",
"check.h",
"check_is_test.cc",
"check_is_test.h",
"check_op.cc",
"check_op.h",
"command_line.cc",
@ -413,7 +400,6 @@ mixed_component("base") {
"memory/raw_ptr_asan_service.cc",
"memory/raw_ptr_asan_service.h",
"memory/raw_ptr_exclusion.h",
"memory/raw_ref.h",
"memory/raw_scoped_refptr_mismatch_checker.h",
"memory/read_only_shared_memory_region.cc",
"memory/read_only_shared_memory_region.h",
@ -512,6 +498,7 @@ mixed_component("base") {
"native_library.cc",
"native_library.h",
"no_destructor.h",
"notreached.cc",
"notreached.h",
"observer_list.h",
"observer_list_internal.cc",
@ -565,6 +552,7 @@ mixed_component("base") {
"profiler/metadata_recorder.h",
"profiler/module_cache.cc",
"profiler/module_cache.h",
"profiler/native_unwinder.h",
"profiler/profile_builder.h",
"profiler/register_context.h",
"profiler/sample_metadata.cc",
@ -926,7 +914,6 @@ mixed_component("base") {
"trace_event/trace_id_helper.h",
"traits_bag.h",
"tuple.h",
"types/always_false.h",
"types/expected.h",
"types/expected_internal.h",
"types/id_type.h",
@ -1279,9 +1266,9 @@ mixed_component("base") {
"process/process_iterator_mac.cc",
"process/process_mac.cc",
"process/process_metrics_mac.cc",
"profiler/frame_pointer_unwinder.cc",
"profiler/frame_pointer_unwinder.h",
"profiler/module_cache_mac.cc",
"profiler/native_unwinder_apple.cc",
"profiler/native_unwinder_apple.h",
"profiler/stack_sampler_mac.cc",
"profiler/suspendable_thread_delegate_mac.cc",
"profiler/suspendable_thread_delegate_mac.h",
@ -1327,8 +1314,8 @@ mixed_component("base") {
if (ios_stack_profiler_enabled) {
sources += [
"profiler/frame_pointer_unwinder.cc",
"profiler/frame_pointer_unwinder.h",
"profiler/native_unwinder_apple.cc",
"profiler/native_unwinder_apple.h",
"profiler/suspendable_thread_delegate_mac.cc",
"profiler/suspendable_thread_delegate_mac.h",
]
@ -1428,13 +1415,15 @@ mixed_component("base") {
configs += [
"//build/config:precompiled_headers",
"//build/config/compiler:prevent_unsafe_narrowing",
# TODO(crbug.com/1292951): Enable.
# "//build/config/compiler:prevent_unsafe_narrowing",
"//build/config/compiler:wexit_time_destructors",
"//build/config/compiler:wglobal_constructors",
]
deps = [
":message_pump_buildflags",
"//base/allocator:buildflags",
"//base/third_party/double_conversion",
"//base/third_party/dynamic_annotations",
@ -1687,14 +1676,8 @@ mixed_component("base") {
"logging_chromeos.cc",
"system/sys_info_chromeos.cc",
]
if (is_chromeos_ash) {
sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ]
if (current_cpu == "x64") {
sources += [
"profiler/frame_pointer_unwinder.cc",
"profiler/frame_pointer_unwinder.h",
]
}
}
@ -1809,7 +1792,6 @@ mixed_component("base") {
deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys",
"//third_party/fuchsia-sdk/sdk/pkg/async-default",
@ -2196,7 +2178,7 @@ mixed_component("base") {
}
if (dep_libevent) {
deps += [ "//third_party/libevent" ]
deps += [ "//base/third_party/libevent" ]
}
if (use_libevent) {
@ -2206,13 +2188,6 @@ mixed_component("base") {
]
}
if (enable_message_pump_epoll) {
sources += [
"message_loop/message_pump_epoll.cc",
"message_loop/message_pump_epoll.h",
]
}
# Android and MacOS have their own custom shared memory handle
# implementations. e.g. due to supporting both POSIX and native handles.
if (is_posix && !is_android && !is_apple) {
@ -2254,7 +2229,8 @@ mixed_component("base") {
]
}
if ((is_posix && !is_apple && !is_android && !is_chromeos) || is_fuchsia) {
if ((is_posix && !is_apple && !is_android && !is_chromeos_ash) ||
is_fuchsia) {
sources += [ "power_monitor/power_monitor_device_source_stub.cc" ]
}
@ -2311,6 +2287,8 @@ mixed_component("base") {
"trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h",
"trace_event/task_execution_macros.h",
"trace_event/thread_instruction_count.cc",
"trace_event/thread_instruction_count.h",
"trace_event/trace_arguments.cc",
"trace_event/trace_arguments.h",
"trace_event/trace_buffer.cc",

View File

@ -3,7 +3,6 @@ include_rules = [
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",

View File

@ -2,7 +2,7 @@ lizeb@chromium.org
primiano@chromium.org
wfh@chromium.org
per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
per-file allocator_shim_default_dispatch_to_partition_alloc*=file://base/allocator/partition_allocator/OWNERS
per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS
per-file allocator.gni=bartekn@chromium.org
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org
per-file partition_alloc*=bartekn@chromium.org
per-file BUILD.gn=bartekn@chromium.org

View File

@ -81,8 +81,7 @@ if (is_win && use_allocator_shim) {
"The allocator shim doesn't work for the component build on Windows.")
}
_is_brp_supported = (is_win || is_android || is_linux || is_mac ||
is_chromeos) && use_allocator == "partition"
_is_brp_supported = (is_win || is_android) && use_allocator == "partition"
declare_args() {
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
@ -113,10 +112,7 @@ declare_args() {
# Finch.
use_fake_binary_experiment = false
# The supported platforms are supposed to match `_is_brp_supported`, but we
# enable the feature on Linux early because it's most widely used for security
# research
use_asan_backup_ref_ptr = is_asan && (is_win || is_android || is_linux)
use_asan_backup_ref_ptr = false
}
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.

View File

@ -44,7 +44,8 @@
#include "base/mac/mac_util.h"
#endif
namespace base::allocator {
namespace base {
namespace allocator {
bool g_replaced_default_zone = false;
@ -236,9 +237,9 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
bool CanGetContextForCFAllocator() {
#if BUILDFLAG(IS_IOS)
return !base::ios::IsRunningOnOrLater(17, 0, 0);
return !base::ios::IsRunningOnOrLater(16, 0, 0);
#else
return !base::mac::IsOSLaterThan13_DontCallThis();
return !base::mac::IsOSLaterThan12_DontCallThis();
#endif
}
@ -257,7 +258,7 @@ void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
void* info) {
void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
@ -266,7 +267,7 @@ void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
void* info) {
void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
@ -275,7 +276,7 @@ void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
void* info) {
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
if (!result)
TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
TerminateBecauseOutOfMemory(alloc_size);
return result;
}
@ -608,4 +609,5 @@ void ReplaceZoneFunctions(ChromeMallocZone* zone,
}
}
} // namespace base::allocator
} // namespace allocator
} // namespace base

View File

@ -174,7 +174,6 @@ BASE_EXPORT void InitializeAllocatorShim();
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
using UseDedicatedAlignedPartition =
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
@ -186,13 +185,12 @@ using AlternateBucketDistribution =
// thread-cache on the main (malloc) partition will be disabled.
BASE_EXPORT void ConfigurePartitions(
EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution);
#if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
BASE_EXPORT void EnablePCScan(base::internal::PCScan::InitConfig);
#endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -27,7 +27,6 @@
#include "base/feature_list.h"
#include "base/memory/nonscannable_memory.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
@ -147,27 +146,26 @@ T* LeakySingleton<T, Constructor>::GetSlowPath() {
class MainPartitionConstructor {
public:
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
constexpr partition_alloc::PartitionOptions::ThreadCache thread_cache =
constexpr base::PartitionOptions::ThreadCache thread_cache =
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Additional partitions may be created in ConfigurePartitions(). Since
// only one partition can have thread cache enabled, postpone the
// decision to turn the thread cache on until after that call.
// TODO(bartekn): Enable it here by default, once the "split-only" mode
// is no longer needed.
partition_alloc::PartitionOptions::ThreadCache::kDisabled;
base::PartitionOptions::ThreadCache::kDisabled;
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Other tests, such as the ThreadCache tests create a thread cache,
// and only one is supported at a time.
partition_alloc::PartitionOptions::ThreadCache::kDisabled;
base::PartitionOptions::ThreadCache::kDisabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
base::PartitionOptions::AlignedAlloc::kAllowed,
thread_cache,
partition_alloc::PartitionOptions::Quarantine::kAllowed,
partition_alloc::PartitionOptions::Cookie::kAllowed,
partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
return new_root;
@ -277,9 +275,9 @@ namespace internal {
namespace {
#if BUILDFLAG(IS_APPLE)
unsigned int g_alloc_flags = 0;
int g_alloc_flags = 0;
#else
constexpr unsigned int g_alloc_flags = 0;
constexpr int g_alloc_flags = 0;
#endif
} // namespace
@ -302,7 +300,7 @@ void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks(
g_alloc_flags, MaybeAdjustSize(size),
0 | g_alloc_flags, MaybeAdjustSize(size),
partition_alloc::PartitionPageSize());
}
@ -515,19 +513,17 @@ void PartitionBatchFree(const AllocatorDispatch*,
}
// static
partition_alloc::ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
return ::Allocator();
}
// static
partition_alloc::ThreadSafePartitionRoot*
PartitionAllocMalloc::OriginalAllocator() {
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() {
return ::OriginalAllocator();
}
// static
partition_alloc::ThreadSafePartitionRoot*
PartitionAllocMalloc::AlignedAllocator() {
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() {
return ::AlignedAllocator();
}
@ -567,7 +563,6 @@ alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
void ConfigurePartitions(
EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution) {
@ -601,22 +596,18 @@ void ConfigurePartitions(
PA_DCHECK(!current_root->flags.with_thread_cache);
return;
}
auto* new_root = new (g_allocator_buffer_for_new_main_partition)
partition_alloc::ThreadSafePartitionRoot({
auto* new_root =
new (g_allocator_buffer_for_new_main_partition) ThreadSafePartitionRoot({
!use_dedicated_aligned_partition
? partition_alloc::PartitionOptions::AlignedAlloc::kAllowed
: partition_alloc::PartitionOptions::AlignedAlloc::kDisallowed,
partition_alloc::PartitionOptions::ThreadCache::kDisabled,
partition_alloc::PartitionOptions::Quarantine::kAllowed,
partition_alloc::PartitionOptions::Cookie::kAllowed,
enable_brp
? partition_alloc::PartitionOptions::BackupRefPtr::kEnabled
: partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
enable_brp_zapping
? partition_alloc::PartitionOptions::BackupRefPtrZapping::kEnabled
: partition_alloc::PartitionOptions::BackupRefPtrZapping::
kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
? base::PartitionOptions::AlignedAlloc::kAllowed
: base::PartitionOptions::AlignedAlloc::kDisallowed,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
enable_brp ? base::PartitionOptions::BackupRefPtr::kEnabled
: base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
@ -624,14 +615,13 @@ void ConfigurePartitions(
// TODO(bartekn): Use the original root instead of creating a new one. It'd
// result in one less partition, but come at a cost of commingling types.
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
partition_alloc::ThreadSafePartitionRoot({
partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
partition_alloc::PartitionOptions::ThreadCache::kDisabled,
partition_alloc::PartitionOptions::Quarantine::kAllowed,
partition_alloc::PartitionOptions::Cookie::kAllowed,
partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo,
});
} else {
// The new main root can also support AlignedAlloc.
@ -653,9 +643,8 @@ void ConfigurePartitions(
PA_CHECK(current_aligned_root == g_original_root);
// Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory(
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
current_root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
PurgeFlags::kDiscardUnusedSystemPages);
if (!use_alternate_bucket_distribution) {
g_root.Get()->SwitchToDenserBucketDistribution();
@ -664,18 +653,16 @@ void ConfigurePartitions(
}
#if defined(PA_ALLOW_PCSCAN)
void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
void EnablePCScan(base::internal::PCScan::InitConfig config) {
partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&::base::PlatformThread::SetName);
partition_alloc::internal::PCScan::Initialize(config);
internal::PCScan::Initialize(config);
partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
internal::PCScan::RegisterScannableRoot(Allocator());
if (OriginalAllocator() != nullptr)
partition_alloc::internal::PCScan::RegisterScannableRoot(
OriginalAllocator());
internal::PCScan::RegisterScannableRoot(OriginalAllocator());
if (Allocator() != AlignedAllocator())
partition_alloc::internal::PCScan::RegisterScannableRoot(
AlignedAllocator());
internal::PCScan::RegisterScannableRoot(AlignedAllocator());
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
@ -737,11 +724,11 @@ SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
partition_alloc::SimplePartitionStatsDumper allocator_dumper;
base::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper);
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
base::SimplePartitionStatsDumper aligned_allocator_dumper;
if (AlignedAllocator() != Allocator()) {
AlignedAllocator()->DumpStats("posix_memalign", true,
&aligned_allocator_dumper);
@ -750,13 +737,13 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
// Dump stats for nonscannable and nonquarantinable allocators.
auto& nonscannable_allocator =
base::internal::NonScannableAllocator::Instance();
partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
base::SimplePartitionStatsDumper nonscannable_allocator_dumper;
if (auto* nonscannable_root = nonscannable_allocator.root())
nonscannable_root->DumpStats("malloc", true,
&nonscannable_allocator_dumper);
auto& nonquarantinable_allocator =
base::internal::NonQuarantinableAllocator::Instance();
partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
base::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
nonquarantinable_root->DumpStats("malloc", true,
&nonquarantinable_allocator_dumper);
@ -765,23 +752,20 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size.
info.hblks = base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes +
info.hblks = allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes;
// Resident bytes.
info.hblkhd = base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes +
info.hblkhd = allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes);
nonquarantinable_allocator_dumper.stats().total_resident_bytes;
// Allocated bytes.
info.uordblks = base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes +
info.uordblks = allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes);
nonquarantinable_allocator_dumper.stats().total_active_bytes;
return info;
}

View File

@ -16,11 +16,11 @@ void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
class BASE_EXPORT PartitionAllocMalloc {
public:
static partition_alloc::ThreadSafePartitionRoot* Allocator();
static ThreadSafePartitionRoot* Allocator();
// May return |nullptr|, will never return the same pointer as |Allocator()|.
static partition_alloc::ThreadSafePartitionRoot* OriginalAllocator();
static ThreadSafePartitionRoot* OriginalAllocator();
// May return the same pointer as |Allocator()|.
static partition_alloc::ThreadSafePartitionRoot* AlignedAllocator();
static ThreadSafePartitionRoot* AlignedAllocator();
};
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,

View File

@ -17,6 +17,43 @@
#include "base/compiler_specific.h"
#include "build/build_config.h"
// std::align_val_t isn't available until C++17, but we want to override aligned
// new/delete anyway to prevent a possible situation where a library gets loaded
// in that uses the aligned operators. We want to avoid a situation where
// separate heaps are used.
// TODO(thomasanderson): Remove this once building with C++17 or later.
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
#define ALIGN_VAL_T std::align_val_t
#define ALIGN_LINKAGE
#define ALIGN_NEW operator new
#define ALIGN_NEW_NOTHROW operator new
#define ALIGN_DEL operator delete
#define ALIGN_DEL_SIZED operator delete
#define ALIGN_DEL_NOTHROW operator delete
#define ALIGN_NEW_ARR operator new[]
#define ALIGN_NEW_ARR_NOTHROW operator new[]
#define ALIGN_DEL_ARR operator delete[]
#define ALIGN_DEL_ARR_SIZED operator delete[]
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
#else
#define ALIGN_VAL_T size_t
#define ALIGN_LINKAGE extern "C"
#if BUILDFLAG(IS_WIN)
#error "Mangling is different on these platforms."
#else
#define ALIGN_NEW _ZnwmSt11align_val_t
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL _ZdlPvSt11align_val_t
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
#endif
#endif
#if !BUILDFLAG(IS_APPLE)
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
#else
@ -73,58 +110,57 @@ SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
std::align_val_t alignment) {
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW(std::size_t size,
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
std::align_val_t alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
std::align_val_t) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_NOTHROW(
std::size_t size,
std::align_val_t) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
std::align_val_t,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
std::align_val_t alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
std::align_val_t alignment,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
std::align_val_t) __THROW {
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL(void* p,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR(
std::size_t size,
std::align_val_t) __THROW {
ALIGN_VAL_T alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR_NOTHROW(
std::size_t size,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL_ARR(void* p,
ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
std::align_val_t,
const std::nothrow_t&) __THROW {
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_ARR_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}

View File

@ -122,10 +122,7 @@ SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
if (actual_size < 0)
return actual_size;
*strp =
static_cast<char*>(realloc(*strp, static_cast<size_t>(actual_size + 1)));
*strp = static_cast<char*>(realloc(*strp, actual_size + 1));
// Now we know the size. This is not very efficient, but we cannot really do
// better without accessing internal libc functions, or reimplementing
@ -134,7 +131,7 @@ SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
// This is very lightly used in Chromium in practice, see crbug.com/116558 for
// details.
if (actual_size >= kInitialSize)
return vsnprintf(*strp, static_cast<size_t>(actual_size + 1), fmt, va_args);
return vsnprintf(*strp, actual_size + 1, fmt, va_args);
return actual_size;
}

View File

@ -1,239 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details {
namespace {
using allocator::AllocatorDispatch;
void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
ReentryGuard guard;
void* address = self->next->alloc_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_unchecked_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_zero_initialized_function(self->next, n, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, n * size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_aligned_function(self->next, alignment, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->realloc_function(self->next, address, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
// Note: The RecordFree should be called before free_function
// (here and in other places).
// That is because we need to remove the recorded allocation sample before
// free_function, as once the latter is executed the address becomes available
// and can be allocated by another thread. That would be racy otherwise.
PoissonAllocationSampler::RecordFree(address);
self->next->free_function(self->next, address, context);
}
size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
ReentryGuard guard;
unsigned num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
if (LIKELY(guard)) {
for (unsigned i = 0; i < num_allocated; ++i) {
PoissonAllocationSampler::RecordAlloc(
results[i], size, PoissonAllocationSampler::kMalloc, nullptr);
}
}
return num_allocated;
}
void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i)
PoissonAllocationSampler::RecordFree(to_be_freed[i]);
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* address =
self->next->aligned_malloc_function(self->next, size, alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
} // namespace
} // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher::partition_allocator_details {
namespace {
void PartitionAllocHook(void* address, size_t size, const char* type) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kPartitionAlloc, type);
}
void PartitionFreeHook(void* address) {
PoissonAllocationSampler::RecordFree(address);
}
} // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher {
void InstallStandardAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator::InsertAllocatorDispatch(
&allocator_shim_details::g_allocator_dispatch);
#else
// If the allocator shim isn't available, then we don't install any hooks.
// There's no point in printing an error message, since this can regularly
// happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
}
void RemoveStandardAllocatorHooksForTesting() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator::RemoveAllocatorDispatchForTesting(
&allocator_shim_details::g_allocator_dispatch); // IN-TEST
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif
}
} // namespace base::allocator::dispatcher

View File

@ -1,17 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/base_export.h"
namespace base::allocator::dispatcher {
void BASE_EXPORT InstallStandardAllocatorHooks();
void BASE_EXPORT RemoveStandardAllocatorHooksForTesting();
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_

View File

@ -1,20 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
namespace base::allocator::dispatcher {
pthread_key_t ReentryGuard::entered_key_ = 0;
} // namespace base::allocator::dispatcher
#endif

View File

@ -1,68 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
#endif
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
// The macOS implementation of libmalloc sometimes calls malloc recursively,
// delegating allocations between zones. That causes our hooks being called
// twice. The scoped guard allows us to detect that.
//
// Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables.
// Make use of pthread TLS instead of C++ thread_local there.
struct ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
}
~ReentryGuard() {
if (LIKELY(allowed_))
pthread_setspecific(entered_key_, nullptr);
}
explicit operator bool() const noexcept { return allowed_; }
// This function must be called in very early of the process start-up in
// order to acquire a low TLS slot number because glibc TLS implementation
// will require a malloc call to allocate storage for a higher slot number
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
static void Init() {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
}
private:
static pthread_key_t entered_key_;
const bool allowed_;
};
#else
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
// ReentryGuard above will otherwise trigger the "unused code" warnings.
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; }
static void Init() {}
};
#endif
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_

View File

@ -11,19 +11,11 @@
namespace base {
namespace features {
const BASE_EXPORT Feature kPartitionAllocDanglingPtr{
"PartitionAllocDanglingPtr", FEATURE_DISABLED_BY_DEFAULT};
constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
{DanglingPtrMode::kCrash, "crash"},
{DanglingPtrMode::kLogSignature, "log_signature"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
&kDanglingPtrModeOption,
};
// When set, instead of crashing when encountering a dangling raw_ptr, the
// signatures of the two stacktraces are logged. This is meant to be used only
// by Chromium developers. See /docs/dangling_ptr.md
const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord{
"PartitionAllocDanglingPtrRecord", FEATURE_DISABLED_BY_DEFAULT};
#if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly.
@ -63,8 +55,7 @@ const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
const Feature kPartitionAllocBackupRefPtr {
"PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN)
FEATURE_ENABLED_BY_DEFAULT
#else
FEATURE_DISABLED_BY_DEFAULT
@ -88,7 +79,6 @@ const base::FeatureParam<BackupRefPtrEnabledProcesses>
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,

View File

@ -17,28 +17,7 @@ struct Feature;
namespace features {
// See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtr;
enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr.
kCrash, // (default)
// Log the signature of every occurrences without crashing. It is used by
// bots.
// Format "[DanglingSignature]\t<1>\t<2>"
// 1. The function who freed the memory while it was still referenced.
// 2. The function who released the raw_ptr reference.
kLogSignature,
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord;
#if defined(PA_ALLOW_PCSCAN)
extern const BASE_EXPORT Feature kPartitionAllocPCScan;
#endif // defined(PA_ALLOW_PCSCAN)
@ -71,9 +50,6 @@ enum class BackupRefPtrMode {
// This entails splitting the main partition.
kEnabled,
// Same as kEnabled but without zapping quarantined objects.
kEnabledWithoutZapping,
// BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode.
kDisabledButSplitPartitions2Way,

View File

@ -50,33 +50,33 @@ namespace {
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString(
partition_alloc::internal::StatsCollector::ScannerId id) {
internal::StatsCollector::ScannerId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::ScannerId::kClear:
case internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear";
case partition_alloc::internal::StatsCollector::ScannerId::kScan:
case internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan";
case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
case internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep";
case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
case internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner";
case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
case internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable();
}
}
constexpr const char* MutatorIdToTracingString(
partition_alloc::internal::StatsCollector::MutatorId id) {
internal::StatsCollector::MutatorId id) {
switch (id) {
case partition_alloc::internal::StatsCollector::MutatorId::kClear:
case internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear";
case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
case internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack";
case partition_alloc::internal::StatsCollector::MutatorId::kScan:
case internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan";
case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
case internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator";
case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
case internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable();
}
}
@ -85,9 +85,8 @@ constexpr const char* MutatorIdToTracingString(
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter {
public:
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::ScannerId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
void ReportTraceEvent(internal::StatsCollector::ScannerId id,
[[maybe_unused]] uint32_t tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
@ -105,9 +104,8 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
void ReportTraceEvent(
partition_alloc::internal::StatsCollector::MutatorId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
void ReportTraceEvent(internal::StatsCollector::MutatorId id,
[[maybe_unused]] uint32_t tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
@ -158,7 +156,7 @@ void RegisterPCScanStatsReporter() {
DCHECK(!registered);
partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true;
}
#endif // defined(PA_ALLOW_PCSCAN)
@ -291,13 +289,6 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
brp_group_name = "EnabledPrevSlot";
#else
brp_group_name = "EnabledBeforeAlloc";
#endif
break;
case features::BackupRefPtrMode::kEnabledWithoutZapping:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlotWithoutZapping";
#else
brp_group_name = "EnabledBeforeAllocWithoutZapping";
#endif
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
@ -422,6 +413,7 @@ absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
// This function is meant to be used only by Chromium developers, to list what
// are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) {
LOG(ERROR) << stacktrace;
std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
@ -461,7 +453,7 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
return std::string(caller.substr(function_start + 1));
}
void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
void DanglingRawPtrReleased(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
@ -469,6 +461,7 @@ void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (FeatureList::IsEnabled(features::kPartitionAllocDanglingPtrRecord)) {
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s",
@ -479,14 +472,8 @@ void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
"[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
}
}
void DanglingRawPtrReleasedCrash(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
return;
}
if (stack_trace_free) {
LOG(ERROR) << StringPrintf(
@ -519,24 +506,8 @@ void InstallDanglingRawPtrChecks() {
// restarting the test executable.
ClearDanglingRawPtrBuffer();
if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
return;
}
switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
break;
case features::DanglingPtrMode::kLogSignature:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(
DanglingRawPtrReleasedLogSignature);
break;
}
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleased);
}
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there

View File

@ -277,6 +277,8 @@ target(partition_alloc_target_type, "partition_alloc") {
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
"//base:synchronization_buildflags",
"//base:tracing_buildflags",
"//build:branding_buildflags",
"//build/config/compiler:compiler_buildflags",
]
@ -322,6 +324,73 @@ target(partition_alloc_target_type, "partition_alloc") {
}
}
source_set("test_support") {
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_mac || is_ios) {
sources +=
[ "partition_alloc_base/threading/platform_thread_mac_for_testing.mm" ]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
"//base:synchronization_buildflags",
"//base:tracing_buildflags",
"//build:branding_buildflags",
"//build/config/compiler:compiler_buildflags",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
deps = [ "//base:base" ]
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_speed" ]
}
}
buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h"

View File

@ -1,4 +1,3 @@
bartekn@chromium.org
haraken@chromium.org
lizeb@chromium.org
tasak@google.com

View File

@ -21,8 +21,6 @@ paths of allocation and deallocation require very few (reasonably predictable)
branches. The number of operations in the fast paths is minimal, leading to the
possibility of inlining.
![general architecture](./dot/layers.png)
However, even the fast path isn't the fastest, because it requires taking
a per-partition lock. Although we optimized the lock, there was still room for
improvement; to this end, we introduced the thread cache.
@ -104,13 +102,6 @@ partition page that holds metadata (32B struct per partition page).
diagram).
* Gray fill denotes guard pages (one partition page each at the head and tail
of each super page).
* In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation.
See [`SuperPagePayloadBegin()`][payload-start] for details.
As allocation requests arrive, there is eventually a need to allocate a new slot
span.
@ -187,4 +178,3 @@ list.
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454

View File

@ -21,6 +21,13 @@
#include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h"
namespace base {
template <typename Type>
struct LazyInstanceTraitsBase;
} // namespace base
namespace partition_alloc {
class AddressSpaceStatsDumper;
@ -159,6 +166,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
#endif // defined(PA_HAS_64_BITS_POINTERS)
static AddressPoolManager singleton_;
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
};
PA_ALWAYS_INLINE pool_handle GetRegularPool() {
@ -176,4 +185,13 @@ PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
} // namespace partition_alloc::internal
namespace base::internal {
using ::partition_alloc::internal::AddressPoolManager;
using ::partition_alloc::internal::GetBRPPool;
using ::partition_alloc::internal::GetConfigurablePool;
using ::partition_alloc::internal::GetRegularPool;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -11,4 +11,12 @@ using pool_handle = unsigned;
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::pool_handle;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_

View File

@ -1,23 +0,0 @@
digraph G {
graph[bgcolor=transparent]
node[shape=box,style="filled,rounded",color=deepskyblue]
subgraph cluster_tc {
label = "Thread Cache"
rankdir = LR
{rank=same;TLS1,TLS2,TLSn}
TLS1->TLS2[style=invisible,dir=none]
TLS2->TLSn[style=dotted,dir=none]
}
subgraph cluster_central {
label = "Central Allocator (per-partition lock)"
fast[label="slot span freelists (fast path)"]
slow[label="slot span management (slow path)"]
# Forces slow path node beneath fast path node.
fast->slow[style=invisible,dir=none]
}
# Forces thread-external subgraph beneath thread cache subgraph.
TLS2->fast[style=invisible,dir=none]
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@ -21,8 +21,6 @@ digraph G {
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
<TD PORT="metadata"></TD>
<TD BGCOLOR="darkgrey" WIDTH="18"></TD>
<!-- Bitmaps -->
<TD WIDTH="100">Bitmaps(?)</TD>
<!-- Several Slot Spans -->
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
@ -52,8 +50,6 @@ digraph G {
<TR>
<!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD>
<!-- Bitmaps Offset -->
<TD> B? </TD>
<!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -71,4 +71,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::MemoryReclaimer;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_

View File

@ -329,4 +329,35 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::AllocPages;
using ::partition_alloc::AllocPagesWithAlignOffset;
using ::partition_alloc::DecommitAndZeroSystemPages;
using ::partition_alloc::DecommitSystemPages;
using ::partition_alloc::DecommittedMemoryIsAlwaysZeroed;
using ::partition_alloc::DiscardSystemPages;
using ::partition_alloc::FreePages;
using ::partition_alloc::GetAllocPageErrorCode;
using ::partition_alloc::GetTotalMappedSize;
using ::partition_alloc::HasReservationForTesting;
using ::partition_alloc::NextAlignedWithOffset;
using ::partition_alloc::PageAccessibilityConfiguration;
using ::partition_alloc::PageAccessibilityDisposition;
using ::partition_alloc::PageTag;
using ::partition_alloc::RecommitSystemPages;
using ::partition_alloc::ReleaseReservation;
using ::partition_alloc::ReserveAddressSpace;
using ::partition_alloc::RoundDownToPageAllocationGranularity;
using ::partition_alloc::RoundDownToSystemPage;
using ::partition_alloc::RoundUpToPageAllocationGranularity;
using ::partition_alloc::RoundUpToSystemPage;
using ::partition_alloc::SetSystemPagesAccess;
using ::partition_alloc::TryRecommitSystemPages;
using ::partition_alloc::TrySetSystemPagesAccess;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_

View File

@ -42,8 +42,8 @@ namespace partition_alloc::internal {
// Use PageAllocationGranularity(), PageAllocationGranularityShift()
// to initialize and retrieve these values safely.
struct PageCharacteristics {
std::atomic<size_t> size;
std::atomic<size_t> shift;
std::atomic<int> size;
std::atomic<int> shift;
};
extern PageCharacteristics page_characteristics;
@ -78,14 +78,13 @@ PageAllocationGranularityShift() {
#elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return static_cast<size_t>(vm_page_shift);
return vm_page_shift;
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache.
size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
int shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (PA_UNLIKELY(shift == 0)) {
shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
shift = __builtin_ctz((int)PageAllocationGranularity());
page_characteristics.shift.store(shift, std::memory_order_relaxed);
}
return shift;
@ -103,9 +102,9 @@ PageAllocationGranularity() {
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
// initialize cache.
size_t size = page_characteristics.size.load(std::memory_order_relaxed);
int size = page_characteristics.size.load(std::memory_order_relaxed);
if (PA_UNLIKELY(size == 0)) {
size = static_cast<size_t>(getpagesize());
size = getpagesize();
page_characteristics.size.store(size, std::memory_order_relaxed);
}
return size;

View File

@ -17,6 +17,7 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_IOS)
@ -136,7 +137,7 @@ void PartitionAddressSpace::Init() {
if (!setup_.regular_pool_base_address_)
HandleGigaCageAllocFailure();
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1) & kMemTagUnmask;
#endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
setup_.regular_pool_ = AddressPoolManager::GetInstance().Add(
@ -163,7 +164,7 @@ void PartitionAddressSpace::Init() {
HandleGigaCageAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1) & kMemTagUnmask;
#endif
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
setup_.brp_pool_ = AddressPoolManager::GetInstance().Add(

View File

@ -61,6 +61,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(USE_BACKUP_REF_PTR)
PA_DCHECK(!IsInBRPPool(address));
@ -154,7 +155,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_;
return ::partition_alloc::internal::UnmaskPtr(address) -
setup_.brp_pool_base_address_;
}
// PartitionAddressSpace is static_only class.
@ -225,12 +227,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// Masks used to easy determine belonging to a pool.
// On Arm, the top byte of each pointer is ignored (meaning there are
// effectively 256 versions of each valid pointer). 4 bits are used to store
// tags for Arm's Memory Tagging Extension (MTE). To ensure that tagged
// pointers are recognized as being in the pool, mask off the top byte with
// kMemTagUnmask.
static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1;
static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
static constexpr uintptr_t kRegularPoolBaseMask =
~kRegularPoolOffsetMask & kMemTagUnmask;
static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
static constexpr uintptr_t kBRPPoolBaseMask =
~kBRPPoolOffsetMask & kMemTagUnmask;
#endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// This must be set to such a value that IsIn*Pool() always returns false when

View File

@ -12,7 +12,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
// Prefetch *x into memory.
@ -54,7 +53,7 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) {
// It's OK to truncate this value.
counter = static_cast<uint8_t>(RandomValue());
counter = static_cast<uint8_t>(::partition_alloc::internal::RandomValue());
}
// If `counter` is 0, this will wrap. That is intentional and OK.
counter--;
@ -62,22 +61,17 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
}
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
return UntagPtr(ptr);
}
PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(const void* object) {
// TODO(bartekn): Check that |object| is indeed an object start.
return ObjectInnerPtr2Addr(object);
}
PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return TagAddr(slot_start);
}
PA_ALWAYS_INLINE uintptr_t SlotStartPtr2Addr(const void* slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return UntagPtr(slot_start);
}
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::SecureMemset;
#if !BUILDFLAG(PA_DCHECK_IS_ON)
using ::partition_alloc::internal::RandomPeriod;
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_

View File

@ -70,29 +70,6 @@ void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
"System pages per slot span must be no greater than 16.");
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
STATIC_ASSERT_OR_PA_CHECK(
internal::GetPartitionRefCountIndexMultiplierShift() <
std::numeric_limits<size_t>::max() / 2,
"Calculation in GetPartitionRefCountIndexMultiplierShift() must not "
"underflow.");
// Check that the GetPartitionRefCountIndexMultiplierShift() calculation is
// correct.
STATIC_ASSERT_OR_PA_CHECK(
(1 << internal::GetPartitionRefCountIndexMultiplierShift()) ==
(internal::SystemPageSize() /
(sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))),
"Bitshift must match the intended multiplication.");
STATIC_ASSERT_OR_PA_CHECK(
((sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))
<< internal::GetPartitionRefCountIndexMultiplierShift()) <=
internal::SystemPageSize(),
"PartitionRefCount Bitmap size must be smaller than or equal to "
"<= SystemPageSize().");
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
}

View File

@ -11,36 +11,6 @@
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_root.h"
// *** HOUSEKEEPING RULES ***
//
// Throughout PartitionAlloc code, we avoid using generic variable names like
// |ptr| or |address|, and prefer names like |object|, |slot_start|, instead.
// This helps emphasize that terms like "object" and "slot" represent two
// different worlds. "Slot" is an indivisible allocation unit, internal to
// PartitionAlloc. It is generally represented as an address (uintptr_t), since
// arithmetic operations on it aren't uncommon, and for that reason it isn't
// MTE-tagged either. "Object" is the allocated memory that the app is given via
// interfaces like Alloc(), Free(), etc. An object is fully contained within a
// slot, and may be surrounded by internal PartitionAlloc structures or empty
// space. Is is generally represented as a pointer to its beginning (most
// commonly void*), and is MTE-tagged so it's safe to access.
//
// The best way to transition between these to worlds is via
// PartitionRoot::ObjectToSlotStart() and ::SlotStartToObject(). These take care
// of shifting between slot/object start, MTE-tagging/untagging and the cast for
// you. There are cases where these functions are insufficient. Internal
// PartitionAlloc structures, like free-list pointers, BRP ref-count, cookie,
// etc. are located in-slot thus accessing them requires an MTE tag.
// SlotStartPtr2Addr() and SlotStartAddr2Ptr() take care of this.
// There are cases where we have to do pointer arithmetic on an object pointer
// (like check belonging to a pool, etc.), in which case we want to strip MTE
// tag. ObjectInnerPtr2Addr() and ObjectPtr2Addr() take care of that.
//
// Avoid using UntagPtr/Addr() and TagPtr/Addr() directly, if possible. And
// definitely avoid using reinterpret_cast between uintptr_t and pointer worlds.
// When you do, add a comment explaining why it's safe from the point of MTE
// tagging.
namespace partition_alloc {
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
@ -73,4 +43,14 @@ using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionAllocator;
using ::partition_alloc::PartitionAllocGlobalInit;
using ::partition_alloc::PartitionAllocGlobalUninitForTesting;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_

View File

@ -17,6 +17,10 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
#include <intrin.h>
#endif
namespace partition_alloc::internal::base::bits {
// Returns true iff |value| is a power of 2.
@ -71,6 +75,85 @@ inline T* AlignUp(T* ptr, size_t alignment) {
//
// C does not have an operator to do this, but fortunately the various
// compilers have built-ins that map to fast underlying processor instructions.
//
// Prefer the clang path on Windows, as _BitScanReverse() and friends are not
// constexpr.
#if defined(COMPILER_MSVC) && !defined(__clang__)
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
int>::type
CountLeadingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
return PA_LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
? (31 - index - (32 - bits))
: bits;
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
int>::type
CountLeadingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
// MSVC only supplies _BitScanReverse64 when building for a 64-bit target.
#if defined(ARCH_CPU_64_BITS)
return PA_LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
? (63 - index)
: 64;
#else
uint32_t left = static_cast<uint32_t>(x >> 32);
if (PA_LIKELY(_BitScanReverse(&index, left)))
return 31 - index;
uint32_t right = static_cast<uint32_t>(x);
if (PA_LIKELY(_BitScanReverse(&index, right)))
return 63 - index;
return 64;
#endif
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
int>::type
CountTrailingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
return PA_LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
: bits;
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
int>::type
CountTrailingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
#if defined(ARCH_CPU_64_BITS)
return PA_LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
: 64;
#else
uint32_t right = static_cast<uint32_t>(x);
if (PA_LIKELY(_BitScanForward(&index, right)))
return index;
uint32_t left = static_cast<uint32_t>(x >> 32);
if (PA_LIKELY(_BitScanForward(&index, left)))
return 32 + index;
return 64;
#endif
}
#elif defined(COMPILER_GCC) || defined(__clang__)
// __builtin_clz has undefined behaviour for an input of 0, even though there's
// clearly a return value that makes sense, and even though some processor clz
// instructions have defined behaviour for 0. We could drop to raw __asm__ to
@ -99,6 +182,8 @@ PA_ALWAYS_INLINE constexpr
: bits;
}
#endif
// Returns the integer i such as 2^i <= n < 2^(i+1).
//
// There is a common `BitLength` function, which returns the number of bits

View File

@ -10,7 +10,7 @@
// on build time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 370000
#pragma clang max_tokens_here 340000
#endif
#include <string.h>

View File

@ -9,6 +9,12 @@ namespace base {
class LapTimer;
template <typename Type, typename Traits>
class LazyInstance;
template <typename Type>
struct LazyInstanceTraitsBase;
} // namespace base
namespace partition_alloc::internal::base {
@ -16,6 +22,8 @@ namespace partition_alloc::internal::base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::base::LapTimer;
using ::base::LazyInstance;
using ::base::LazyInstanceTraitsBase;
} // namespace partition_alloc::internal::base

View File

@ -23,7 +23,7 @@
namespace partition_alloc::internal::base {
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
inline constexpr uint16_t ByteSwap(uint16_t x) {
inline uint16_t ByteSwap(uint16_t x) {
#if defined(COMPILER_MSVC) && !defined(__clang__)
return _byteswap_ushort(x);
#else
@ -31,7 +31,7 @@ inline constexpr uint16_t ByteSwap(uint16_t x) {
#endif
}
inline constexpr uint32_t ByteSwap(uint32_t x) {
inline uint32_t ByteSwap(uint32_t x) {
#if defined(COMPILER_MSVC) && !defined(__clang__)
return _byteswap_ulong(x);
#else
@ -69,21 +69,21 @@ inline constexpr uintptr_t ByteSwapUintPtrT(uintptr_t x) {
// Converts the bytes in |x| from host order (endianness) to little endian, and
// returns the result.
inline constexpr uint16_t ByteSwapToLE16(uint16_t x) {
inline uint16_t ByteSwapToLE16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return x;
#else
return ByteSwap(x);
#endif
}
inline constexpr uint32_t ByteSwapToLE32(uint32_t x) {
inline uint32_t ByteSwapToLE32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return x;
#else
return ByteSwap(x);
#endif
}
inline constexpr uint64_t ByteSwapToLE64(uint64_t x) {
inline uint64_t ByteSwapToLE64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return x;
#else
@ -93,21 +93,21 @@ inline constexpr uint64_t ByteSwapToLE64(uint64_t x) {
// Converts the bytes in |x| from network to host order (endianness), and
// returns the result.
inline constexpr uint16_t NetToHost16(uint16_t x) {
inline uint16_t NetToHost16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else
return x;
#endif
}
inline constexpr uint32_t NetToHost32(uint32_t x) {
inline uint32_t NetToHost32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else
return x;
#endif
}
inline constexpr uint64_t NetToHost64(uint64_t x) {
inline uint64_t NetToHost64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else
@ -117,21 +117,21 @@ inline constexpr uint64_t NetToHost64(uint64_t x) {
// Converts the bytes in |x| from host to network order (endianness), and
// returns the result.
inline constexpr uint16_t HostToNet16(uint16_t x) {
inline uint16_t HostToNet16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else
return x;
#endif
}
inline constexpr uint32_t HostToNet32(uint32_t x) {
inline uint32_t HostToNet32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else
return x;
#endif
}
inline constexpr uint64_t HostToNet64(uint64_t x) {
inline uint64_t HostToNet64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x);
#else

View File

@ -12,7 +12,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <memory>
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h"

View File

@ -10,7 +10,7 @@
// time.h is a widely included header and its size impacts build time.
// Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#pragma clang max_tokens_here 490000
#pragma clang max_tokens_here 390000
#endif // BUILDFLAG(IS_LINUX)
#include <atomic>

View File

@ -327,7 +327,7 @@ DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
// "rollover" counter.
union LastTimeAndRolloversState {
// The state as a single 32-bit opaque value.
std::atomic<int32_t> as_opaque_32{0};
std::atomic<int32_t> as_opaque_32;
// The state as usable values.
struct {

View File

@ -27,30 +27,35 @@ namespace partition_alloc {
// Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
// AlignedAllocWithFlags, etc.
struct AllocFlags {
static constexpr unsigned int kReturnNull = 1 << 0;
static constexpr unsigned int kZeroFill = 1 << 1;
// In order to support bit operations like `flag_a | flag_b`, the old-
// fashioned enum (+ surrounding named struct) is used instead of enum class.
enum : unsigned int {
kReturnNull = 1 << 0,
kZeroFill = 1 << 1,
// Don't allow allocation override hooks. Override hooks are expected to
// check for the presence of this flag and return false if it is active.
static constexpr unsigned int kNoOverrideHooks = 1 << 2;
kNoOverrideHooks = 1 << 2,
// Never let a memory tool like ASan (if active) perform the allocation.
static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
kNoMemoryToolOverride = 1 << 3,
// Don't allow any hooks (override or observers).
static constexpr unsigned int kNoHooks = 1 << 4; // Internal.
kNoHooks = 1 << 4, // Internal only.
// If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common.
static constexpr unsigned int kFastPathOrReturnNull = 1 << 5; // Internal.
kFastPathOrReturnNull = 1 << 5, // Internal only.
static constexpr unsigned int kLastFlag = kFastPathOrReturnNull;
kLastFlag = kFastPathOrReturnNull
};
};
// Bit flag constants used as `flag` argument of PartitionRoot::FreeWithFlags.
struct FreeFlags {
// See AllocFlags::kNoMemoryToolOverride.
static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
enum : unsigned int {
kNoMemoryToolOverride = 1 << 0, // See AllocFlags::kNoMemoryToolOverride.
static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
kLastFlag = kNoMemoryToolOverride
};
};
namespace internal {
@ -240,7 +245,7 @@ constexpr size_t kSuperPageShift = 21; // 2 MiB
constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask & kMemTagUnmask;
// GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and
// one that doesn't.
@ -278,14 +283,14 @@ static constexpr pool_handle kConfigurablePoolHandle = 3;
constexpr size_t kMaxMemoryTaggingSize = 1024;
#if defined(PA_HAS_MEMORY_TAGGING)
// Returns whether the tag of |object| overflowed, meaning the containing slot
// needs to be moved to quarantine.
// Returns whether the tag of |object| overflowed and the containing slot needs
// to be moved to quarantine.
PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
// The tag with which the slot is put to quarantine.
constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
static_assert((kOverflowTag & kPtrTagMask) != 0,
static_assert((kOverflowTag & ~kMemTagUnmask) != 0,
"Overflow tag must be in tag bits");
return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
return (reinterpret_cast<uintptr_t>(object) & ~kMemTagUnmask) == kOverflowTag;
}
#endif // defined(PA_HAS_MEMORY_TAGGING)

View File

@ -25,9 +25,16 @@ namespace internal {
// the second one 16. We could technically return something different for
// malloc() and operator new(), but this would complicate things, and most of
// our allocations are presumably coming from operator new() anyway.
//
// __STDCPP_DEFAULT_NEW_ALIGNMENT__ is C++17. As such, it is not defined on all
// platforms, as Chrome's requirement is C++14 as of 2020.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
constexpr size_t kAlignment =
std::max(alignof(max_align_t),
static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
#else
constexpr size_t kAlignment = alignof(max_align_t);
#endif
static_assert(kAlignment <= 16,
"PartitionAlloc doesn't support a fundamental alignment larger "
"than 16 bytes.");
@ -55,6 +62,14 @@ using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionRoot;
} // namespace base
// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
//
// The malloc attribute indicates that the function acts like a system memory

View File

@ -82,4 +82,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionAllocHooks;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_

View File

@ -803,7 +803,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate);
}
PCScan::RegisterNewSuperPage(root, super_page);
::base::internal::PCScan::RegisterNewSuperPage(root, super_page);
}
return payload;
@ -883,8 +883,9 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
}
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, size);
// Ensure the memory tag of the return_slot is unguessable.
return_slot =
::partition_alloc::internal::TagMemoryRangeRandomly(return_slot, size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag());
@ -895,20 +896,14 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
uintptr_t next_slot_end = next_slot + size;
size_t free_list_entries_added = 0;
while (next_slot_end <= commit_end) {
void* next_slot_ptr;
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
// Ensure the MTE-tag of the memory pointed by other provisioned slot is
// unguessable. They will be returned to the app as is, and the MTE-tag
// will only change upon calling Free().
next_slot_ptr = TagMemoryRangeRandomly(next_slot, size);
} else {
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
next_slot =
::partition_alloc::internal::TagMemoryRangeRandomly(next_slot, size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot);
if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry);
PA_DCHECK(!free_list_entries_added);

View File

@ -202,4 +202,12 @@ struct PartitionBucket {
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionBucket;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_

View File

@ -270,4 +270,12 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::BucketIndexLookup;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_

View File

@ -44,4 +44,18 @@ PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {}
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::kCookieSize;
using ::partition_alloc::internal::kPartitionCookieSizeAdjustment;
using ::partition_alloc::internal::PartitionCookieCheckValue;
using ::partition_alloc::internal::PartitionCookieWriteValue;
#if BUILDFLAG(PA_DCHECK_IS_ON)
using ::partition_alloc::internal::kCookieValue;
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_

View File

@ -72,4 +72,13 @@ PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionDirectMapExtent;
using ::partition_alloc::internal::PartitionDirectMapMetadata;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_

View File

@ -41,7 +41,6 @@ class EncodedPartitionFreelistEntryPtr {
std::nullptr_t)
: encoded_(Transform(0)) {}
explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
// The encoded pointer stays MTE-tagged.
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
@ -117,15 +116,11 @@ class PartitionFreelistEntry {
// Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it as null-terminated.
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
void* slot_start_tagged) {
// |slot_start_tagged| is MTE-tagged.
auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
return entry;
}
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) {
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
auto* entry = new (reinterpret_cast<void*>(slot_start))
PartitionFreelistEntry(nullptr);
return entry;
}
// Emplaces the freelist entry at the beginning of the given slot span, and
@ -138,7 +133,7 @@ class PartitionFreelistEntry {
uintptr_t slot_start,
PartitionFreelistEntry* next) {
auto* entry =
new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
new (reinterpret_cast<void*>(slot_start)) PartitionFreelistEntry(next);
return entry;
}
@ -150,7 +145,7 @@ class PartitionFreelistEntry {
static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
void* next,
bool make_shadow_match) {
new (SlotStartAddr2Ptr(slot_start))
new (reinterpret_cast<void*>(slot_start))
PartitionFreelistEntry(next, make_shadow_match);
}
@ -179,7 +174,7 @@ class PartitionFreelistEntry {
}
}
PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
// SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the
// size.
@ -187,14 +182,15 @@ class PartitionFreelistEntry {
// Regular freelists always point to an entry within the same super page.
//
// This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY(entry &&
(SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
if (PA_UNLIKELY(
ptr &&
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) !=
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) {
FreelistCorruptionDetected(0);
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
encoded_next_ = EncodedPartitionFreelistEntryPtr(ptr);
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
shadow_ = encoded_next_.Inverted();
#endif
@ -208,7 +204,8 @@ class PartitionFreelistEntry {
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
shadow_ = 0;
#endif
return SlotStartPtr2Addr(this);
uintptr_t slot_start = reinterpret_cast<uintptr_t>(this);
return slot_start;
}
PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
@ -232,8 +229,8 @@ class PartitionFreelistEntry {
//
// Also, the lightweight UaF detection (pointer shadow) is checked.
uintptr_t here_address = SlotStartPtr2Addr(here);
uintptr_t next_address = SlotStartPtr2Addr(next);
uintptr_t here_address = reinterpret_cast<uintptr_t>(here);
uintptr_t next_address = reinterpret_cast<uintptr_t>(next);
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
@ -330,4 +327,12 @@ PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionFreelistEntry;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_

View File

@ -37,4 +37,24 @@ extern OomFunction g_oom_handling_function;
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::OomFunction;
namespace internal {
using ::partition_alloc::internal::g_oom_handling_function;
using ::partition_alloc::internal::PartitionExcessiveAllocationSize;
#if !defined(ARCH_CPU_64_BITS)
using ::partition_alloc::internal::PartitionOutOfMemoryWithLargeVirtualSize;
using ::partition_alloc::internal::
PartitionOutOfMemoryWithLotsOfUncommitedPages;
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_

View File

@ -263,7 +263,9 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
for (PartitionFreelistEntry* head = freelist_head; head;
head = head->GetNext(slot_size)) {
++num_free_slots;
size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
size_t offset_in_slot_span = ::partition_alloc::internal::UnmaskPtr(
reinterpret_cast<uintptr_t>(head)) -
slot_span_start;
size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span);
PA_DCHECK(slot_number < num_provisioned_slots);
free_slots[slot_number] = true;
@ -278,8 +280,9 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
for (size_t slot_number = 0; slot_number < num_provisioned_slots;
slot_number++) {
if (free_slots[slot_number]) {
uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
uintptr_t slot_address = ::partition_alloc::internal::RemaskPtr(
slot_span_start + (slot_size * slot_number));
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_address);
if (!head)
head = entry;

View File

@ -502,6 +502,7 @@ SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
// surely never contain user data.
PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
bool with_quarantine) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// Quarantine can only be enabled for normal buckets in the current code.
PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
uintptr_t super_page = address & kSuperPageBaseMask;
@ -580,6 +581,7 @@ PA_ALWAYS_INLINE uintptr_t SlotSpanMetadata<thread_safe>::ToSlotSpanStart(
template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromAddr(uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
auto* page = PartitionPage<thread_safe>::FromAddr(address);
PA_DCHECK(page->is_valid);
// Partition pages in the same slot span share the same SlotSpanMetadata
@ -609,7 +611,9 @@ SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Checks that the pointer is a multiple of slot size.
uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
PA_DCHECK(!((::partition_alloc::internal::UnmaskPtr(slot_start) -
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size));
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
return slot_span;
}
@ -621,14 +625,16 @@ SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromObject(void* object) {
uintptr_t object_addr = ObjectPtr2Addr(object);
uintptr_t object_addr = PartitionRoot<thread_safe>::ObjectPtr2Addr(object);
auto* slot_span = FromAddr(object_addr);
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Checks that the object is exactly |extras_offset| away from a multiple of
// slot size (i.e. from a slot start).
uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
PA_DCHECK((::partition_alloc::internal::UnmaskPtr(object_addr) -
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size ==
root->flags.extras_offset);
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
return slot_span;
@ -659,7 +665,8 @@ SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromObjectInnerPtr(void* ptr) {
return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
return FromObjectInnerAddr(
PartitionRoot<thread_safe>::ObjectInnerPtr2Addr(ptr));
}
template <bool thread_safe>
@ -681,14 +688,9 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
template <bool thread_safe>
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
PartitionFreelistEntry* new_head) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
// |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
// as well.
uintptr_t new_head_untagged = UntagPtr(new_head);
PA_DCHECK(!new_head ||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
(new_head_untagged & kSuperPageBaseMask));
#endif
(reinterpret_cast<uintptr_t>(new_head) & kSuperPageBaseMask));
freelist_head = new_head;
// Inserted something new in the freelist, assume that it is not sorted
// anymore.
@ -718,8 +720,7 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(uintptr_t slot_start)
root->lock_.AssertAcquired();
#endif
auto* entry = static_cast<internal::PartitionFreelistEntry*>(
SlotStartAddr2Ptr(slot_start));
auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(slot_start);
// Catches an immediate double free.
PA_CHECK(entry != freelist_head);
// Look for double free one level deeper in debug.
@ -761,10 +762,11 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::AppendFreeList(
size_t number_of_entries = 0;
for (auto* entry = head; entry;
entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
uintptr_t untagged_entry = UntagPtr(entry);
uintptr_t unmasked_entry = ::partition_alloc::internal::UnmaskPtr(
reinterpret_cast<uintptr_t>(entry));
// Check that all entries belong to this slot span.
PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
PA_DCHECK(untagged_entry <
PA_DCHECK(ToSlotSpanStart(this) <= unmasked_entry);
PA_DCHECK(unmasked_entry <
ToSlotSpanStart(this) + bucket->get_bytes_per_span());
}
PA_DCHECK(number_of_entries == number_of_freed);
@ -900,4 +902,23 @@ void IterateSlotSpans(uintptr_t super_page,
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::AllocationStateMap;
using ::partition_alloc::internal::CommittedStateBitmapSize;
using ::partition_alloc::internal::IterateSlotSpans;
using ::partition_alloc::internal::PartitionPage;
using ::partition_alloc::internal::PartitionSuperPageExtentEntry;
using ::partition_alloc::internal::PartitionSuperPageToExtent;
using ::partition_alloc::internal::PartitionSuperPageToMetadataArea;
using ::partition_alloc::internal::ReservedStateBitmapSize;
using ::partition_alloc::internal::SlotSpanMetadata;
using ::partition_alloc::internal::StateBitmapFromAddr;
using ::partition_alloc::internal::SuperPageStateBitmap;
using ::partition_alloc::internal::SuperPageStateBitmapAddr;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_

View File

@ -319,49 +319,19 @@ static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
constexpr size_t kPartitionPastAllocationAdjustment = 0;
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
constexpr size_t kPartitionRefCountIndexMultiplier =
SystemPageSize() /
(sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()));
#if defined(PA_REF_COUNT_CHECK_COOKIE) || \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 4;
#else // defined(PA_REF_COUNT_CHECK_COOKIE) ||
// defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 3;
#endif // defined(PA_REF_COUNT_CHECK_COOKIE) ||
// defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if defined(PA_REF_COUNT_CHECK_COOKIE) && \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 4;
#elif defined(PA_REF_COUNT_CHECK_COOKIE) || \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 3;
#else
static constexpr size_t kPartitionRefCountSizeShift = 2;
#endif
#endif // defined(PA_REF_COUNT_CHECK_COOKIE)
static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
// We need one PartitionRefCount for each system page in a super page. They take
// `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
// They need to fit into a system page of metadata as sparsely as possible to
// minimize cache line sharing, hence we calculate a multiplier as
// `SystemPageSize() / x`.
//
// The multiplier is expressed as a bitshift to optimize the code generation.
// SystemPageSize() isn't always a constrexpr, in which case the compiler
// wouldn't know it's a power of two. The equivalence of these calculations is
// checked in PartitionAllocGlobalInit().
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetPartitionRefCountIndexMultiplierShift() {
return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
}
static_assert((sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()) *
kPartitionRefCountIndexMultiplier <=
SystemPageSize()),
"PartitionRefCount Bitmap size must be smaller than or equal to "
"<= SystemPageSize().");
PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
uintptr_t slot_start) {
PA_DCHECK(slot_start == ::partition_alloc::internal::RemaskPtr(slot_start));
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckThatSlotOffsetIsZero(slot_start);
#endif
@ -370,20 +340,17 @@ PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
#endif
// Have to MTE-tag, because the address is untagged, but lies within a slot
// area, which is protected by MTE.
//
// There could be a race condition though if the previous slot is
// freed/retagged concurrently, so ideally the ref count should occupy its
// own MTE granule.
// Have to remask because the previous pointer's tag is unpredictable. There
// could be a race condition though if the previous slot is freed/retagged
// concurrently, so ideally the ref count should occupy its own MTE granule.
// TODO(richard.townsend@arm.com): improve this.
return static_cast<PartitionRefCount*>(TagAddr(refcount_address));
return ::partition_alloc::internal::RemaskPtr(
reinterpret_cast<PartitionRefCount*>(refcount_address));
} else {
// No need to tag, as the metadata region isn't protected by MTE.
PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
<< GetPartitionRefCountIndexMultiplierShift();
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift()) *
kPartitionRefCountIndexMultiplier;
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
#endif
@ -408,9 +375,7 @@ PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckThatSlotOffsetIsZero(slot_start);
#endif
// Have to MTE-tag, because the address is untagged, but lies within a slot
// area, which is protected by MTE.
return static_cast<PartitionRefCount*>(TagAddr(slot_start));
return reinterpret_cast<PartitionRefCount*>(slot_start);
}
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
@ -429,4 +394,19 @@ constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
#if BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::kPartitionPastAllocationAdjustment;
using ::partition_alloc::internal::PartitionRefCount;
using ::partition_alloc::internal::PartitionRefCountPointer;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::kInSlotRefCountBufferSize;
using ::partition_alloc::internal::kPartitionRefCountOffsetAdjustment;
using ::partition_alloc::internal::kPartitionRefCountSizeAdjustment;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_

View File

@ -337,8 +337,10 @@ static size_t PartitionPurgeSlotSpan(
// slots are not in use.
for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry;
/**/) {
size_t slot_index =
(SlotStartPtr2Addr(entry) - slot_span_start) / slot_size;
size_t slot_index = (::partition_alloc::internal::UnmaskPtr(
reinterpret_cast<uintptr_t>(entry)) -
slot_span_start) /
slot_size;
PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
#if !BUILDFLAG(IS_WIN)
@ -477,11 +479,12 @@ static void PartitionDumpSlotSpanStats(
if (slot_span->CanStoreRawSize()) {
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
stats_out->active_count += 1;
} else {
stats_out->active_bytes +=
(slot_span->num_allocated_slots * stats_out->bucket_slot_size);
}
stats_out->active_count += slot_span->num_allocated_slots;
}
size_t slot_span_bytes_resident = RoundUpToSystemPage(
(bucket_num_slots - slot_span->num_unprovisioned_slots) *
@ -681,10 +684,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
flags.brp_enabled_ =
opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
flags.brp_zapping_enabled_ =
opts.backup_ref_ptr_zapping ==
PartitionOptions::BackupRefPtrZapping::kEnabled;
PA_CHECK(!flags.brp_zapping_enabled_ || flags.brp_enabled_);
#else
PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
#endif
@ -829,7 +828,6 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
internal::SlotSpanMetadata<thread_safe>* slot_span,
size_t requested_size) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
// Slot-span metadata isn't MTE-tagged.
PA_DCHECK(
internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
@ -923,7 +921,8 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
#if BUILDFLAG(PA_DCHECK_IS_ON)
// Write a new trailing cookie.
if (flags.allow_cookie) {
auto* object = static_cast<unsigned char*>(SlotStartToObject(slot_start));
auto* object =
reinterpret_cast<unsigned char*>(SlotStartToObject(slot_start));
internal::PartitionCookieWriteValue(object +
slot_span->GetUsableSize(this));
}
@ -934,17 +933,17 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
template <bool thread_safe>
bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
void* object,
void* ptr,
SlotSpan* slot_span,
size_t new_size) {
uintptr_t slot_start = ObjectToSlotStart(object);
PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
PA_DCHECK(internal::IsManagedByNormalBuckets(address));
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (AllocationCapacityFromRequestedSize(new_size) !=
AllocationCapacityFromPtr(object))
AllocationCapacityFromPtr(ptr))
return false;
// Trying to allocate |new_size| would use the same amount of underlying
@ -952,6 +951,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// statistics (and cookie, if present).
if (slot_span->CanStoreRawSize()) {
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t slot_start = ObjectToSlotStart(ptr);
internal::PartitionRefCount* old_ref_count;
if (brp_enabled()) {
old_ref_count = internal::PartitionRefCountPointer(slot_start);
@ -972,12 +972,13 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later).
if (flags.allow_cookie) {
internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
internal::PartitionCookieWriteValue(
reinterpret_cast<unsigned char*>(address) +
slot_span->GetUsableSize(this));
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
}
return object;
return ptr;
}
template <bool thread_safe>
@ -1175,12 +1176,6 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed);
stats.total_brp_quarantined_count =
total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed);
stats.cumulative_brp_quarantined_bytes =
cumulative_size_of_brp_quarantined_bytes.load(
std::memory_order_relaxed);
stats.cumulative_brp_quarantined_count =
cumulative_count_of_brp_quarantined_slots.load(
std::memory_order_relaxed);
#endif
size_t direct_mapped_allocations_total_size = 0;

View File

@ -83,14 +83,6 @@
namespace partition_alloc::internal {
// This type trait verifies a type can be used as a pointer offset.
//
// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
// Smaller types are also allowed.
template <typename Z>
static constexpr bool offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
static constexpr size_t kAllocInfoSize = 1 << 20;
struct AllocInfo {
@ -179,11 +171,6 @@ struct PartitionOptions {
kEnabled,
};
enum class BackupRefPtrZapping : uint8_t {
kDisabled,
kEnabled,
};
enum class UseConfigurablePool : uint8_t {
kNo,
kIfAvailable,
@ -195,14 +182,12 @@ struct PartitionOptions {
Quarantine quarantine,
Cookie cookie,
BackupRefPtr backup_ref_ptr,
BackupRefPtrZapping backup_ref_ptr_zapping,
UseConfigurablePool use_configurable_pool)
: aligned_alloc(aligned_alloc),
thread_cache(thread_cache),
quarantine(quarantine),
cookie(cookie),
backup_ref_ptr(backup_ref_ptr),
backup_ref_ptr_zapping(backup_ref_ptr_zapping),
use_configurable_pool(use_configurable_pool) {}
AlignedAlloc aligned_alloc;
@ -210,7 +195,6 @@ struct PartitionOptions {
Quarantine quarantine;
Cookie cookie;
BackupRefPtr backup_ref_ptr;
BackupRefPtrZapping backup_ref_ptr_zapping;
UseConfigurablePool use_configurable_pool;
};
@ -257,7 +241,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool allow_cookie;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
bool brp_enabled_;
bool brp_zapping_enabled_;
#endif
bool use_configurable_pool;
@ -322,8 +305,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
#endif
// Slot span memory which has been provisioned, and is currently unused as
// it's part of an empty SlotSpan. This is not clean memory, since it has
@ -714,21 +695,32 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return size - flags.extras_size;
}
PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return slot_start + flags.extras_offset;
}
PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
// TODO(bartekn): Move MTE tagging here.
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return internal::TagAddr(SlotStartToObjectAddr(slot_start));
return reinterpret_cast<void*>(slot_start + flags.extras_offset);
}
PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
return UntagPtr(object) - flags.extras_offset;
// TODO(bartekn): Move MTE untagging here.
return reinterpret_cast<uintptr_t>(object) - flags.extras_offset;
// TODO(bartekn): Check that the result is indeed a slot start.
}
static PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(void* object) {
// TODO(bartekn): Add MTE untagging here.
return reinterpret_cast<uintptr_t>(object);
}
static PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(void* object) {
// TODO(bartekn): Check that |object| is indeed an object start.
return ObjectInnerPtr2Addr(object);
}
static PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
// TODO(bartekn): Move MTE tagging here.
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return reinterpret_cast<void*>(slot_start);
}
bool brp_enabled() const {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
return flags.brp_enabled_;
@ -737,14 +729,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif
}
bool brp_zapping_enabled() const {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
return flags.brp_zapping_enabled_;
#else
return false;
#endif
}
PA_ALWAYS_INLINE bool uses_configurable_pool() const {
return flags.use_configurable_pool;
}
@ -810,7 +794,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return ret;
}
// Allocates a memory slot, without initializing extras.
// Allocates memory, without initializing extras.
//
// - |flags| are as in AllocWithFlags().
// - |raw_size| accommodates for extras on top of AllocWithFlags()'s
@ -831,7 +815,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool* is_already_zeroed)
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool TryReallocInPlaceForNormalBuckets(void* object,
bool TryReallocInPlaceForNormalBuckets(void* ptr,
SlotSpan* slot_span,
size_t new_size);
bool TryReallocInPlaceForDirectMap(
@ -914,8 +898,7 @@ PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
PartitionDirectMapMetadata<ThreadSafe>::FromSlotSpan(slot_span);
size_t padding_for_alignment =
metadata->direct_map_extent.padding_for_alignment;
PA_DCHECK(padding_for_alignment ==
static_cast<size_t>(page - first_page) * PartitionPageSize());
PA_DCHECK(padding_for_alignment == (page - first_page) * PartitionPageSize());
PA_DCHECK(slot_start ==
reservation_start + PartitionPageSize() + padding_for_alignment);
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
@ -931,6 +914,8 @@ PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
// ref-count is in place for this allocation.
PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// Adjust to support pointers right past the end of an allocation, which in
// some cases appear to point outside the designated allocation slot.
//
@ -955,11 +940,14 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
// Get the offset from the beginning of the slot span.
uintptr_t slot_span_start =
SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
PA_DCHECK(slot_span_start ==
::partition_alloc::internal::UnmaskPtr(slot_span_start));
size_t offset_in_slot_span = address - slot_span_start;
auto* bucket = slot_span->bucket;
return slot_span_start +
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
return ::partition_alloc::internal::RemaskPtr(
slot_span_start +
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span));
}
// Checks whether a given address stays within the same allocation slot after
@ -967,9 +955,8 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
//
// This isn't a general purpose function. The caller is responsible for ensuring
// that the ref-count is in place for this allocation.
template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address,
Z delta_in_bytes) {
ptrdiff_t delta_in_bytes) {
// Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
@ -987,10 +974,11 @@ PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address,
// Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled());
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t new_address = address + static_cast<uintptr_t>(delta_in_bytes);
void* object = root->SlotStartToObject(slot_start);
uintptr_t object_addr = PartitionRoot<ThreadSafe>::ObjectPtr2Addr(object);
uintptr_t new_address = address + delta_in_bytes;
return object_addr <= new_address &&
// We use "greater than or equal" below because we want to include
// We use "greater then or equal" below because we want to include
// pointers right past the end of an allocation.
new_address <= object_addr + slot_span->GetUsableSize(root);
}
@ -1006,7 +994,7 @@ PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
// memset() can be really expensive.
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
DebugMemset(reinterpret_cast<void*>(slot_start), kFreedByte,
slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(PartitionRefCount)
@ -1033,8 +1021,9 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
size_t slot_span_alignment,
size_t* usable_size,
bool* is_already_zeroed) {
PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
internal::base::bits::IsPowerOfTwo(slot_span_alignment));
PA_DCHECK(
(slot_span_alignment >= internal::PartitionPageSize()) &&
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
SlotSpan* slot_span = bucket->active_slot_spans_head;
// There always must be a slot span on the active list (could be a sentinel).
PA_DCHECK(slot_span);
@ -1043,7 +1032,7 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
PA_DCHECK(!slot_span->marked_full);
uintptr_t slot_start =
internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
reinterpret_cast<uintptr_t>(slot_span->get_freelist_head());
// Use the fast path when a slot is readily available on the free list of the
// first active slot span. However, fall back to the slow path if a
// higher-order alignment is requested, because an inner slot of an existing
@ -1065,7 +1054,7 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
PA_DCHECK(!slot_span->CanStoreRawSize());
PA_DCHECK(!slot_span->bucket->is_direct_mapped());
void* entry = slot_span->PopForAlloc(bucket->slot_size);
PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
PA_DCHECK(reinterpret_cast<uintptr_t>(entry) == slot_start);
PA_DCHECK(slot_span->bucket == bucket);
} else {
@ -1129,7 +1118,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
// only cases where we don't would be delayed free() in PCScan, but |*object|
// can be cold in cache.
PA_PREFETCH(object);
uintptr_t object_addr = internal::ObjectPtr2Addr(object);
uintptr_t object_addr = ObjectPtr2Addr(object);
// On Android, malloc() interception is more fragile than on other
// platforms, as we use wrapped symbols. However, the GigaCage allows us to
@ -1165,10 +1154,12 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
#if defined(PA_HAS_MEMORY_TAGGING)
const size_t slot_size = slot_span->bucket->slot_size;
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
internal::TagMemoryRangeIncrement(slot_start, slot_size);
// TODO(bartekn): |slot_start| shouldn't have MTE tag.
slot_start = ::partition_alloc::internal::TagMemoryRangeIncrement(
slot_start, slot_size);
// Incrementing the MTE-tag in the memory range invalidates the |object|'s
// tag, so it must be retagged.
object = internal::TagPtr(object);
object = ::partition_alloc::internal::RemaskPtr(object);
}
#else
// We are going to read from |*slot_span| in all branches, but haven't done it
@ -1252,7 +1243,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
if (flags.allow_cookie) {
// Verify the cookie after the allocated region.
// If this assert fires, you probably corrupted memory.
internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
internal::PartitionCookieCheckValue(
reinterpret_cast<unsigned char*>(object) +
slot_span->GetUsableSize(this));
}
#endif
@ -1261,8 +1253,11 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// default.
if (PA_UNLIKELY(IsQuarantineEnabled())) {
if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
// Mark the state in the state bitmap as freed.
internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
internal::StateBitmapFromAddr(unmasked_slot_start)
->Free(unmasked_slot_start);
}
}
@ -1274,8 +1269,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// If there are no more references to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
brp_zapping_enabled()))
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs()))
internal::SecureMemset(object, internal::kQuarantinedByte,
slot_span->GetUsableSize(this));
@ -1284,10 +1278,6 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
total_count_of_brp_quarantined_slots.fetch_add(1,
std::memory_order_relaxed);
cumulative_size_of_brp_quarantined_bytes.fetch_add(
slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
cumulative_count_of_brp_quarantined_slots.fetch_add(
1, std::memory_order_relaxed);
return;
}
}
@ -1295,8 +1285,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// memset() can be really expensive.
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
internal::kFreedByte,
internal::DebugMemset(SlotStartAddr2Ptr(slot_start), internal::kFreedByte,
slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(internal::PartitionRefCount)
@ -1307,7 +1296,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// efficiency.
if (PA_UNLIKELY(internal::RandomPeriod()) &&
!IsDirectMappedBucket(slot_span->bucket)) {
internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
internal::SecureMemset(SlotStartAddr2Ptr(slot_start), 0,
slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(internal::PartitionRefCount)
@ -1361,8 +1350,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(uintptr_t slot_start,
// RawFreeLocked()). This is intentional, as the thread cache is purged often,
// and the memory has a consequence the memory has already been touched
// recently (to link the thread cache freelist).
*static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
0;
*reinterpret_cast<volatile uintptr_t*>(slot_start) = 0;
// Note: even though we write to slot_start + sizeof(void*) as well, due to
// alignment constraints, the two locations are always going to be in the same
// OS page. No need to write to the second one as well.
@ -1430,6 +1418,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(
template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
SlotSpan* slot_span) {
slot_span = ::partition_alloc::internal::UnmaskPtr(slot_span);
PartitionRoot* root = FromSlotSpan(slot_span);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
@ -1861,8 +1850,11 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
// default.
if (PA_UNLIKELY(is_quarantine_enabled)) {
if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
// Mark the corresponding bits in the state bitmap as allocated.
internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
internal::StateBitmapFromAddr(unmasked_slot_start)
->Allocate(unmasked_slot_start);
}
}
@ -1965,7 +1957,8 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
// |alignment| is a power of two, but the compiler doesn't necessarily know
// that. A regular % operation is very slow, make sure to use the equivalent,
// faster form.
// No need to MTE-untag, as it doesn't change alignment.
// No need to call ObjectPtr2Addr, because MTE untagging isn't necessary, as
// it doesn't change alignment.
PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
return object;
@ -2028,12 +2021,30 @@ using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
internal::kPartitionCachelineSize,
"Padding is incorrect");
#if BUILDFLAG(USE_BACKUP_REF_PTR)
// Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionOptions;
using ::partition_alloc::PurgeFlags;
using ::partition_alloc::ThreadSafePartitionRoot;
namespace internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::ScopedSyscallTimer;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::PartitionAllocFreeForRefCounting;
using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
using ::partition_alloc::internal::PartitionAllocIsValidPtrDelta;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_

View File

@ -59,10 +59,6 @@ struct PartitionMemoryStats {
total_brp_quarantined_bytes; // Total bytes that are quarantined by BRP.
size_t total_brp_quarantined_count; // Total number of slots that are
// quarantined by BRP.
size_t cumulative_brp_quarantined_bytes; // Cumulative bytes that are
// quarantined by BRP.
size_t cumulative_brp_quarantined_count; // Cumulative number of slots that
// are quarantined by BRP.
#endif
bool has_thread_cache;
@ -133,4 +129,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SimplePartitionStatsDumper
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionBucketMemoryStats;
using ::partition_alloc::PartitionMemoryStats;
using ::partition_alloc::PartitionStatsDumper;
using ::partition_alloc::SimplePartitionStatsDumper;
using ::partition_alloc::ThreadCacheStats;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_

View File

@ -48,15 +48,12 @@ PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
size_t offset_in_bitmap = offset_in_super_page >>
internal::tag_bitmap::kBytesPerPartitionTagShift
<< internal::tag_bitmap::kPartitionTagSizeShift;
// No need to tag, as the tag bitmap region isn't protected by MTE.
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagPointer relates to software MTE
// (i.e. MTECheckedPtr) and it returns a pointer to the tag in memory.
return PartitionTagPointer(UntagPtr(ptr));
return PartitionTagPointer(
internal::UnmaskPtr(reinterpret_cast<uintptr_t>(ptr)));
}
namespace internal {
@ -78,10 +75,7 @@ PA_ALWAYS_INLINE void PartitionTagSetValue(uintptr_t addr,
PA_ALWAYS_INLINE void PartitionTagSetValue(void* ptr,
size_t size,
PartitionTag value) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagSetValue relates to software MTE
// (i.e. MTECheckedPtr) and it sets the in-memory tag.
PartitionTagSetValue(UntagPtr(ptr), size, value);
PartitionTagSetValue(reinterpret_cast<uintptr_t>(ptr), size, value);
}
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {

View File

@ -34,7 +34,7 @@ using PartitionTlsKey = pthread_key_t;
#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
namespace {
PA_ALWAYS_INLINE void* FastTlsGet(PartitionTlsKey index) {
PA_ALWAYS_INLINE void* FastTlsGet(intptr_t index) {
// On macOS, pthread_getspecific() is in libSystem, so a call to it has to go
// through PLT. However, and contrary to some other platforms, *all* TLS keys
// are in a static array in the thread structure. So they are *always* at a
@ -53,10 +53,7 @@ PA_ALWAYS_INLINE void* FastTlsGet(PartitionTlsKey index) {
// This function is essentially inlining the content of pthread_getspecific()
// here.
intptr_t result;
static_assert(sizeof index <= sizeof(intptr_t));
asm("movq %%gs:(,%1,8), %0;"
: "=r"(result)
: "r"(static_cast<intptr_t>(index)));
asm("movq %%gs:(,%1,8), %0;" : "=r"(result) : "r"(index));
return reinterpret_cast<void*>(result);
}
@ -143,4 +140,18 @@ PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionTlsCreate;
using ::partition_alloc::internal::PartitionTlsGet;
using ::partition_alloc::internal::PartitionTlsKey;
using ::partition_alloc::internal::PartitionTlsSet;
#if BUILDFLAG(IS_WIN)
using ::partition_alloc::internal::PartitionTlsSetOnDllProcessDetach;
#endif // BUILDFLAG(IS_WIN)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_

View File

@ -146,6 +146,7 @@ PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
#if defined(PA_HAS_64_BITS_POINTERS)
// In 64-bit mode, find the owning Pool and compute the offset from its base.
address = ::partition_alloc::internal::UnmaskPtr(address);
auto [pool, offset] = GetPoolAndOffset(address);
return ReservationOffsetPointer(pool, offset);
#else
@ -253,4 +254,21 @@ PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
} // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::GetDirectMapReservationStart;
using ::partition_alloc::internal::GetReservationOffsetTable;
using ::partition_alloc::internal::GetReservationOffsetTableEnd;
using ::partition_alloc::internal::IsManagedByDirectMap;
using ::partition_alloc::internal::IsManagedByNormalBuckets;
using ::partition_alloc::internal::IsManagedByNormalBucketsOrDirectMap;
using ::partition_alloc::internal::IsReservationStart;
using ::partition_alloc::internal::kOffsetTagNormalBuckets;
using ::partition_alloc::internal::kOffsetTagNotAllocated;
using ::partition_alloc::internal::ReservationOffsetPointer;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_

View File

@ -17,7 +17,6 @@ constexpr PartitionOptions kConfig{
PartitionOptions::Quarantine::kDisallowed,
PartitionOptions::Cookie::kAllowed,
PartitionOptions::BackupRefPtr::kDisabled,
PartitionOptions::BackupRefPtrZapping::kDisabled,
PartitionOptions::UseConfigurablePool::kNo,
};
} // namespace

View File

@ -80,4 +80,16 @@ struct PCScanMetadataDeleter final {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::AllocatedOnPCScanMetadataPartition;
using ::partition_alloc::internal::MakePCScanMetadata;
using ::partition_alloc::internal::MetadataAllocator;
using ::partition_alloc::internal::PCScanMetadataAllocator;
using ::partition_alloc::internal::PCScanMetadataDeleter;
using ::partition_alloc::internal::ReinitPCScanMetadataAllocatorForTesting;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_

View File

@ -247,12 +247,15 @@ PA_ALWAYS_INLINE void PCScan::MoveToQuarantine(void* object,
SecureMemset(object, 0, usable_size);
}
auto* state_bitmap = StateBitmapFromAddr(slot_start);
// TODO(bartekn): Remove MTE untagging, once its done in the caller.
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
auto* state_bitmap = StateBitmapFromAddr(unmasked_slot_start);
// Mark the state in the state bitmap as quarantined. Make sure to do it after
// the clearing to avoid racing with *Scan Sweeper.
[[maybe_unused]] const bool succeeded =
state_bitmap->Quarantine(slot_start, instance.epoch());
state_bitmap->Quarantine(unmasked_slot_start, instance.epoch());
#if PA_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED
if (PA_UNLIKELY(!succeeded))
DoubleFreeAttempt();
@ -278,4 +281,10 @@ inline PCScanScheduler& PCScan::scheduler() {
} // namespace internal
} // namespace partition_alloc
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::PCScan;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_

View File

@ -142,6 +142,7 @@ class QuarantineCardTable final {
// slots. May return false positives for but should never return false
// negatives, as otherwise this breaks security.
PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
address = ::partition_alloc::internal::UnmaskPtr(address);
const size_t byte = Byte(address);
PA_SCAN_DCHECK(byte < bytes_.size());
return bytes_[byte];
@ -652,7 +653,6 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
PA_SCAN_INLINE size_t
PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
// Check if |maybe_ptr| points somewhere to the heap.
// The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
if (!state_map)
return 0;
@ -722,7 +722,8 @@ void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
// ScanPartitions.
const size_t size = slot_span->GetUsableSize(root);
if (clear_type == PCScan::ClearType::kLazy) {
void* object = root->SlotStartToObject(slot_start);
void* object = ::partition_alloc::internal::RemaskPtr(
root->SlotStartToObject(slot_start));
memset(object, 0, size);
}
#if PA_STARSCAN_USE_CARD_TABLE
@ -773,10 +774,9 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
}
#endif // defined(PA_HAS_64_BITS_POINTERS)
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
// |maybe_ptr| may have an MTE tag, so remove it first.
quarantine_size_ +=
task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr) {
quarantine_size_ += task_.TryMarkSlotInNormalBuckets(
::partition_alloc::internal::UnmaskPtr(maybe_ptr));
}
const PCScanTask& task_;
@ -937,6 +937,8 @@ void UnmarkInCardTable(uintptr_t slot_start,
SlotSpanMetadata<ThreadSafe>* slot_span,
uintptr_t slot_start) {
void* object = root->SlotStartToObject(slot_start);
// TODO(bartekn): Move MTE masking into SlotStartToObject.
object = ::partition_alloc::internal::RemaskPtr(object);
root->FreeNoHooksImmediate(object, slot_span, slot_start);
UnmarkInCardTable(slot_start, slot_span);
return slot_span->bucket->slot_size;
@ -1001,7 +1003,8 @@ void UnmarkInCardTable(uintptr_t slot_start,
const auto bitmap_iterator = [&](uintptr_t slot_start) {
SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(
::partition_alloc::internal::RemaskPtr(slot_start));
if (current_slot_span != previous_slot_span) {
// We started scanning a new slot span. Flush the accumulated freelist to

View File

@ -20,8 +20,11 @@
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
#include "base/allocator/partition_allocator/starscan/write_protector.h"
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace partition_alloc::internal {
class StarScanSnapshot;
class PCScanTask;
// Internal PCScan singleton. The separation between frontend and backend is
@ -108,7 +111,7 @@ class PCScanInternal final {
private:
friend internal::base::NoDestructor<PCScanInternal>;
friend class StarScanSnapshot;
friend class partition_alloc::internal::StarScanSnapshot;
using StackTops = std::unordered_map<
internal::base::PlatformThreadId,
@ -146,4 +149,11 @@ class PCScanInternal final {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::PCScanInternal;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_

View File

@ -33,7 +33,8 @@ void PCScanSchedulingBackend::EnableScheduling() {
scheduling_enabled_.store(true, std::memory_order_relaxed);
// Check if *Scan needs to be run immediately.
if (NeedsToImmediatelyScan())
PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
::base::internal::PCScan::PerformScan(
::base::internal::PCScan::InvocationMode::kNonBlocking);
}
size_t PCScanSchedulingBackend::ScanStarted() {

View File

@ -198,4 +198,14 @@ bool PCScanScheduler::AccountFreed(size_t size) {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::LimitBackend;
using ::partition_alloc::internal::MUAwareTaskBasedBackend;
using ::partition_alloc::internal::PCScanScheduler;
using ::partition_alloc::internal::QuarantineData;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_

View File

@ -139,4 +139,9 @@ void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::RacefulWorklist;
}
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_

View File

@ -12,7 +12,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if defined(ARCH_CPU_X86_64)
@ -93,9 +92,7 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
#if defined(PA_HAS_64_BITS_POINTERS)
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::CageMask() & kPtrUntagMask;
const uintptr_t mask = Derived::CageMask();
const uintptr_t base = Derived::CageBase();
#endif
for (; begin < end; begin += sizeof(uintptr_t)) {
@ -129,10 +126,7 @@ __attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
// vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
// (_mm256_load_pd).
const __m256i vbase = _mm256_set1_epi64x(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m256i cage_mask =
_mm256_set1_epi64x(derived().CageMask() & kPtrUntagMask);
const __m256i cage_mask = _mm256_set1_epi64x(derived().CageMask());
static_assert(sizeof(__m256i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
@ -168,10 +162,7 @@ __attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const __m128i vbase = _mm_set1_epi64x(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m128i cage_mask =
_mm_set1_epi64x(derived().CageMask() & kPtrUntagMask);
const __m128i cage_mask = _mm_set1_epi64x(derived().CageMask());
static_assert(sizeof(__m128i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
@ -209,10 +200,7 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const uint64x2_t vbase = vdupq_n_u64(derived().CageBase());
// If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const uint64x2_t cage_mask =
vdupq_n_u64(derived().CageMask() & kPtrUntagMask);
const uint64x2_t cage_mask = vdupq_n_u64(derived().CageMask());
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details.
@ -236,4 +224,11 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::ScanLoop;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_

View File

@ -91,4 +91,11 @@ StarScanSnapshot::UnprotectingView::UnprotectingView(StarScanSnapshot& snapshot)
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::StarScanSnapshot;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_

View File

@ -45,4 +45,14 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Stack final {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::GetStackPointer;
using ::partition_alloc::internal::GetStackTop;
using ::partition_alloc::internal::Stack;
using ::partition_alloc::internal::StackVisitor;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_

View File

@ -27,4 +27,12 @@ enum class SimdSupport : uint8_t {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::Context;
using ::partition_alloc::internal::SimdSupport;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_

View File

@ -245,4 +245,11 @@ inline StatsCollector::MetadataString StatsCollector::ToUMAString(
} // namespace internal
} // namespace partition_alloc
// TODO(crbug.com/1151236): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::StatsCollector;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_

View File

@ -5,22 +5,24 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h"
namespace partition_alloc {
static_assert(sizeof(uint32_t) >= sizeof(internal::base::PlatformThreadId),
"sizeof(tid) must be larger than sizeof(PlatformThreadId)");
// StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1,
// and UmaHistogramTimes. It is used to just remove trace_log and uma
// dependencies from partition allocator.
class StatsReporter {
public:
virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
internal::base::PlatformThreadId tid,
uint32_t tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) {}
virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
internal::base::PlatformThreadId tid,
uint32_t tid,
int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) {}

View File

@ -27,8 +27,9 @@
namespace partition_alloc::internal {
PCScan::ClearType NoWriteProtector::SupportedClearType() const {
return PCScan::ClearType::kLazy;
::base::internal::PCScan::ClearType NoWriteProtector::SupportedClearType()
const {
return ::base::internal::PCScan::ClearType::kLazy;
}
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
@ -57,7 +58,7 @@ void UserFaultFDThread(int uffd) {
// Enter the safepoint. Concurrent faulted writes will wait until safepoint
// finishes.
PCScan::JoinScanIfNeeded();
::base::internal::PCScan::JoinScanIfNeeded();
}
}
} // namespace
@ -120,8 +121,10 @@ void UserFaultFDWriteProtector::UnprotectPages(uintptr_t begin, size_t length) {
UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect);
}
PCScan::ClearType UserFaultFDWriteProtector::SupportedClearType() const {
return IsSupported() ? PCScan::ClearType::kEager : PCScan::ClearType::kLazy;
::base::internal::PCScan::ClearType
UserFaultFDWriteProtector::SupportedClearType() const {
return IsSupported() ? ::base::internal::PCScan::ClearType::kEager
: ::base::internal::PCScan::ClearType::kLazy;
}
bool UserFaultFDWriteProtector::IsSupported() const {

View File

@ -28,14 +28,14 @@ class WriteProtector : public AllocatedOnPCScanMetadataPartition {
virtual bool IsEnabled() const = 0;
virtual PCScan::ClearType SupportedClearType() const = 0;
virtual ::base::internal::PCScan::ClearType SupportedClearType() const = 0;
};
class NoWriteProtector final : public WriteProtector {
public:
void ProtectPages(uintptr_t, size_t) final {}
void UnprotectPages(uintptr_t, size_t) final {}
PCScan::ClearType SupportedClearType() const final;
::base::internal::PCScan::ClearType SupportedClearType() const final;
inline bool IsEnabled() const override;
};
@ -55,7 +55,7 @@ class UserFaultFDWriteProtector final : public WriteProtector {
void ProtectPages(uintptr_t, size_t) final;
void UnprotectPages(uintptr_t, size_t) final;
PCScan::ClearType SupportedClearType() const final;
::base::internal::PCScan::ClearType SupportedClearType() const final;
inline bool IsEnabled() const override;
@ -73,4 +73,15 @@ bool UserFaultFDWriteProtector::IsEnabled() const {
} // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::NoWriteProtector;
using ::partition_alloc::internal::WriteProtector;
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
using ::partition_alloc::internal::UserFaultFDWriteProtector;
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_

View File

@ -39,11 +39,10 @@ namespace internal {
constexpr int kMemTagGranuleSize = 16u;
#if defined(PA_HAS_MEMORY_TAGGING)
constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
constexpr uint64_t kMemTagUnmask = 0x00ffffffffffffffuLL;
#else
constexpr uint64_t kPtrTagMask = 0;
constexpr uint64_t kMemTagUnmask = 0xffffffffffffffffuLL;
#endif // defined(PA_HAS_MEMORY_TAGGING)
constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
#if BUILDFLAG(IS_ANDROID)
// Changes the memory tagging mode for all threads in the current process.
@ -87,7 +86,6 @@ extern PA_COMPONENT_EXPORT(PARTITION_ALLOC)
// (e.g. free). Returns the pointer with the new tag. Ensures that the entire
// range is set to the same tag.
// TODO(bartekn): Remove the T* variant.
// TODO(bartekn): Consider removing the return value.
template <typename T>
PA_ALWAYS_INLINE T* TagMemoryRangeIncrement(T* ptr, size_t size) {
#if defined(PA_HAS_MEMORY_TAGGING)
@ -96,8 +94,9 @@ PA_ALWAYS_INLINE T* TagMemoryRangeIncrement(T* ptr, size_t size) {
return ptr;
#endif
}
PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(uintptr_t ptr, size_t size) {
return TagMemoryRangeIncrement(reinterpret_cast<void*>(ptr), size);
PA_ALWAYS_INLINE uintptr_t TagMemoryRangeIncrement(uintptr_t ptr, size_t size) {
return reinterpret_cast<uintptr_t>(
TagMemoryRangeIncrement(reinterpret_cast<void*>(ptr), size));
}
// Randomly changes the tag of the ptr memory range. Useful for initial random
@ -115,45 +114,46 @@ PA_ALWAYS_INLINE T* TagMemoryRangeRandomly(T* ptr,
return ptr;
#endif
}
PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t ptr,
PA_ALWAYS_INLINE uintptr_t TagMemoryRangeRandomly(uintptr_t ptr,
size_t size,
uint64_t mask = 0u) {
return TagMemoryRangeRandomly(reinterpret_cast<void*>(ptr), size, mask);
return reinterpret_cast<uintptr_t>(
TagMemoryRangeRandomly(reinterpret_cast<void*>(ptr), size, mask));
}
// Gets a version of ptr that's safe to dereference.
// TODO(bartekn): Remove the T* variant.
template <typename T>
PA_ALWAYS_INLINE T* TagPtr(T* ptr) {
PA_ALWAYS_INLINE T* RemaskPtr(T* ptr) {
#if defined(PA_HAS_MEMORY_TAGGING)
return reinterpret_cast<T*>(global_remask_void_ptr_fn(ptr));
#else
return ptr;
#endif
}
// Gets a version of |address| that's safe to dereference, and casts to a
// pointer.
PA_ALWAYS_INLINE void* TagAddr(uintptr_t address) {
return TagPtr(reinterpret_cast<void*>(address));
// Gets a version of address that's safe to dereference, if cast to a pointer.
PA_ALWAYS_INLINE uintptr_t RemaskPtr(uintptr_t address) {
return reinterpret_cast<uintptr_t>(
RemaskPtr(reinterpret_cast<void*>(address)));
}
// Strips the tag bits off |address|.
PA_ALWAYS_INLINE uintptr_t UntagAddr(uintptr_t address) {
// Strips the tag bits off address.
PA_ALWAYS_INLINE uintptr_t UnmaskPtr(uintptr_t address) {
#if defined(PA_HAS_MEMORY_TAGGING)
return address & internal::kPtrUntagMask;
return address & kMemTagUnmask;
#else
return address;
#endif
}
// Strips the tag bits off ptr.
// TODO(bartekn): Remove the T* variant.
template <typename T>
PA_ALWAYS_INLINE T* UnmaskPtr(T* ptr) {
return reinterpret_cast<T*>(UnmaskPtr(reinterpret_cast<uintptr_t>(ptr)));
}
} // namespace internal
// Strips the tag bits off |ptr|.
template <typename T>
PA_ALWAYS_INLINE uintptr_t UntagPtr(T* ptr) {
return internal::UntagAddr(reinterpret_cast<uintptr_t>(ptr));
}
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_

View File

@ -10,7 +10,6 @@
#include <atomic>
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -453,8 +452,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
internal::PartitionPageSize(), &usable_size,
&already_zeroed);
ThreadCache* tcache =
new (internal::SlotStartAddr2Ptr(buffer)) ThreadCache(root);
ThreadCache* tcache = new (reinterpret_cast<void*>(buffer)) ThreadCache(root);
// This may allocate.
internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
@ -521,16 +519,12 @@ void ThreadCache::Delete(void* tcache_ptr) {
auto* root = tcache->root_;
tcache->~ThreadCache();
// TreadCache was allocated using RawAlloc() and SlotStartAddr2Ptr(), so it
// shifted by extras, but is MTE-tagged.
root->RawFree(internal::SlotStartPtr2Addr(tcache_ptr));
root->RawFree(reinterpret_cast<uintptr_t>(tcache_ptr));
#if BUILDFLAG(IS_WIN)
// On Windows, allocations do occur during thread/process teardown, make sure
// they don't resurrect the thread cache.
//
// Don't MTE-tag, as it'd mess with the sentinel value.
//
// TODO(lizeb): Investigate whether this is needed on POSIX as well.
internal::PartitionTlsSet(internal::g_thread_cache_key,
reinterpret_cast<void*>(kTombstone));
@ -685,7 +679,7 @@ void ThreadCache::FreeAfter(internal::PartitionFreelistEntry* head,
// acquisitions can be expensive.
internal::ScopedGuard guard(root_->lock_);
while (head) {
uintptr_t slot_start = internal::SlotStartPtr2Addr(head);
uintptr_t slot_start = reinterpret_cast<uintptr_t>(head);
head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
root_->RawFreeLocked(slot_start);
}

View File

@ -10,7 +10,6 @@
#include <limits>
#include <memory>
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -248,19 +247,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
#if defined(PA_THREAD_CACHE_FAST_TLS)
return internal::g_thread_cache;
#else
// This region isn't MTE-tagged.
return reinterpret_cast<ThreadCache*>(
internal::PartitionTlsGet(internal::g_thread_cache_key));
#endif
}
static bool IsValid(ThreadCache* tcache) {
// Do not MTE-untag, as it'd mess up the sentinel value.
return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask;
}
static bool IsTombstone(ThreadCache* tcache) {
// Do not MTE-untag, as it'd mess up the sentinel value.
return reinterpret_cast<uintptr_t>(tcache) == kTombstone;
}
@ -531,14 +527,14 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
}
PA_DCHECK(bucket.count != 0);
internal::PartitionFreelistEntry* entry = bucket.freelist_head;
internal::PartitionFreelistEntry* result = bucket.freelist_head;
// Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to
// narrow down the search for culprit. |bucket| was touched just now, so this
// does not introduce another cache miss.
internal::PartitionFreelistEntry* next =
entry->GetNextForThreadCache<true>(bucket.slot_size);
PA_DCHECK(entry != next);
result->GetNextForThreadCache<true>(bucket.slot_size);
PA_DCHECK(result != next);
bucket.count--;
PA_DCHECK(bucket.count != 0 || !next);
bucket.freelist_head = next;
@ -546,7 +542,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_DCHECK(cached_memory_ >= bucket.slot_size);
cached_memory_ -= bucket.slot_size;
return internal::SlotStartPtr2Addr(entry);
return reinterpret_cast<uintptr_t>(result);
}
PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
@ -569,23 +565,19 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
static_assert(internal::kAlignment == 16, "");
#if PA_HAS_BUILTIN(__builtin_assume_aligned)
// Cast back to uintptr_t, because we need it for pointer arithmetic. Make
// sure it gets MTE-tagged, as we cast it later to a pointer and dereference.
uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(__builtin_assume_aligned(
internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment));
uintptr_t address = reinterpret_cast<uintptr_t>(__builtin_assume_aligned(
reinterpret_cast<void*>(slot_start), internal::kAlignment));
#else
uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(internal::SlotStartAddr2Ptr(slot_start));
uintptr_t address = slot_start;
#endif
// The pointer is always 16 bytes aligned, so its start address is always == 0
// % 16. Its distance to the next cacheline is `64 - ((address_tagged & 63) /
// 16) * 16`.
// % 16. Its distance to the next cacheline is 64 - ((address & 63) / 16) *
// 16.
static_assert(
internal::kPartitionCachelineSize == 64,
"The computation below assumes that cache lines are 64 bytes long.");
int distance_to_next_cacheline_in_16_bytes = 4 - ((address_tagged >> 4) & 3);
int distance_to_next_cacheline_in_16_bytes = 4 - ((address >> 4) & 3);
int slot_size_remaining_in_16_bytes =
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// When BRP is on in the "previous slot" mode, this slot may have a BRP
@ -599,10 +591,10 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
slot_size_remaining_in_16_bytes = std::min(
slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes);
static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
0xbadbad00, 0xbadbad00};
// Already MTE-tagged above, so safe to dereference.
uint32_t* address_aligned = reinterpret_cast<uint32_t*>(address_tagged);
static const uint32_t poison_16_bytes[4] = {0xdeadbeef, 0xdeadbeef,
0xdeadbeef, 0xdeadbeef};
uint32_t* address_aligned = reinterpret_cast<uint32_t*>(address);
for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
// Clang will expand the memcpy to a 16-byte write (movups on x86).
memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));
@ -619,4 +611,13 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
} // namespace partition_alloc
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::ThreadCache;
using ::partition_alloc::ThreadCacheRegistry;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_

View File

@ -17,7 +17,6 @@
#include "base/bits.h"
#include "base/check_op.h"
#include "base/numerics/safe_conversions.h"
namespace base {
namespace allocator {
@ -123,7 +122,7 @@ void* AlignAllocation(void* ptr, size_t alignment) {
// Write the prefix.
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
prefix->original_allocation_offset =
checked_cast<unsigned int>(address - reinterpret_cast<uintptr_t>(ptr));
address - reinterpret_cast<uintptr_t>(ptr);
#if DCHECK_IS_ON()
prefix->magic = AlignedPrefix::kMagic;
#endif // DCHECK_IS_ON()

View File

@ -29,8 +29,7 @@ int OpenApkAsset(const std::string& file_path,
CHECK_EQ(3U, results.size());
int fd = static_cast<int>(results[0]);
region->offset = results[1];
// Not a checked_cast because open() may return -1.
region->size = static_cast<size_t>(results[2]);
region->size = results[2];
return fd;
}

View File

@ -21,11 +21,11 @@ namespace android {
namespace {
// We are leaking these strings.
const char* StrDupParam(const std::vector<std::string>& params, size_t index) {
const char* StrDupParam(const std::vector<std::string>& params, int index) {
return strdup(params[index].c_str());
}
int GetIntParam(const std::vector<std::string>& params, size_t index) {
int GetIntParam(const std::vector<std::string>& params, int index) {
int ret = 0;
bool success = StringToInt(params[index], &ret);
DCHECK(success);

View File

@ -27,7 +27,7 @@ void JNI_ChildProcessService_RegisterFileDescriptors(
const JavaParamRef<jlongArray>& j_sizes) {
std::vector<absl::optional<std::string>> keys;
JavaObjectArrayReader<jstring> keys_array(j_keys);
keys.reserve(checked_cast<size_t>(keys_array.size()));
keys.reserve(keys_array.size());
for (auto str : keys_array) {
absl::optional<std::string> key;
if (str) {
@ -54,7 +54,7 @@ void JNI_ChildProcessService_RegisterFileDescriptors(
base::MemoryMappedFile::Region region = {offsets.at(i),
static_cast<size_t>(sizes.at(i))};
const absl::optional<std::string>& key = keys.at(i);
const auto id = static_cast<GlobalDescriptors::Key>(ids.at(i));
int id = ids.at(i);
int fd = fds.at(i);
if (key) {
base::FileDescriptorStore::GetInstance().Set(*key, base::ScopedFD(fd),

View File

@ -15,7 +15,7 @@ jint JNI_CpuFeatures_GetCoreCount(JNIEnv*) {
}
jlong JNI_CpuFeatures_GetCpuFeatures(JNIEnv*) {
return static_cast<jlong>(android_getCpuFeatures());
return android_getCpuFeatures();
}
} // namespace android

View File

@ -106,8 +106,7 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncBeginEvent(
std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
internal::kJavaTraceCategory, name.c_str(), TRACE_ID_LOCAL(id),
TimeTicks::FromJavaNanoTime(time_ns),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
@ -120,8 +119,7 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncEndEvent(
std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
internal::kJavaTraceCategory, name.c_str(), TRACE_ID_LOCAL(id),
TimeTicks::FromJavaNanoTime(time_ns),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}

View File

@ -23,9 +23,7 @@ namespace {
void AddFrameToTrace(int64_t timestamp_ns, int64_t durations_ns) {
#if BUILDFLAG(ENABLE_BASE_TRACING)
if (timestamp_ns < 0)
return;
auto t = perfetto::Track(static_cast<uint64_t>(timestamp_ns));
auto t = perfetto::Track(timestamp_ns);
TRACE_EVENT_BEGIN(
"ui", "AndroidFrameVsync", t, [&](perfetto::EventContext ctx) {
ctx.event()->set_timestamp_absolute_us(timestamp_ns / 1000);
@ -77,7 +75,7 @@ void RecordJankMetrics(
std::string missed_frames_histogram_name =
base::StrCat({"Android.Jank.MissedFrames.", scenario_name});
for (size_t i = 0; i < timestamps_ns.size(); ++i) {
for (unsigned i = 0; i < timestamps_ns.size(); ++i) {
AddFrameToTrace(timestamps_ns[i], durations_ns[i]);
}

View File

@ -26,8 +26,7 @@ void (*g_java_exception_callback)(const char*);
using JavaExceptionFilter =
base::RepeatingCallback<bool(const JavaRef<jthrowable>&)>;
LazyInstance<JavaExceptionFilter>::Leaky g_java_exception_filter =
LAZY_INSTANCE_INITIALIZER;
LazyInstance<JavaExceptionFilter>::Leaky g_java_exception_filter;
} // namespace

View File

@ -26,13 +26,13 @@ namespace base {
namespace android {
JavaHandlerThread::JavaHandlerThread(const char* name,
base::ThreadType thread_type)
base::ThreadPriority priority)
: JavaHandlerThread(
name,
Java_JavaHandlerThread_create(
AttachCurrentThread(),
ConvertUTF8ToJavaString(AttachCurrentThread(), name),
base::internal::ThreadTypeToNiceValue(thread_type))) {}
base::internal::ThreadPriorityToNiceValue(priority))) {}
JavaHandlerThread::JavaHandlerThread(
const char* name,

View File

@ -33,7 +33,7 @@ class BASE_EXPORT JavaHandlerThread {
// Create new thread.
explicit JavaHandlerThread(
const char* name,
base::ThreadType thread_type = base::ThreadType::kDefault);
base::ThreadPriority priority = base::ThreadPriority::NORMAL);
// Wrap and connect to an existing JavaHandlerThread.
// |obj| is an instance of JavaHandlerThread.
explicit JavaHandlerThread(

View File

@ -5,20 +5,16 @@
#include "base/android/java_runtime.h"
#include "base/android_runtime_jni_headers/Runtime_jni.h"
#include "base/numerics/safe_conversions.h"
namespace base {
namespace android {
void JavaRuntime::GetMemoryUsage(uint64_t* total_memory,
uint64_t* free_memory) {
void JavaRuntime::GetMemoryUsage(long* total_memory, long* free_memory) {
JNIEnv* env = base::android::AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobject> runtime =
JNI_Runtime::Java_Runtime_getRuntime(env);
*total_memory = checked_cast<uint64_t>(
JNI_Runtime::Java_Runtime_totalMemory(env, runtime));
*free_memory = checked_cast<uint64_t>(
JNI_Runtime::Java_Runtime_freeMemory(env, runtime));
*total_memory = JNI_Runtime::Java_Runtime_totalMemory(env, runtime);
*free_memory = JNI_Runtime::Java_Runtime_freeMemory(env, runtime);
}
} // namespace android

View File

@ -16,7 +16,7 @@ class BASE_EXPORT JavaRuntime {
public:
// Fills the total memory used and memory allocated for objects by the java
// heap in the current process. Returns true on success.
static void GetMemoryUsage(uint64_t* total_memory, uint64_t* free_memory);
static void GetMemoryUsage(long* total_memory, long* free_memory);
};
} // namespace android

View File

@ -9,7 +9,6 @@
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/check_op.h"
#include "base/numerics/safe_conversions.h"
namespace base {
namespace android {
@ -31,12 +30,11 @@ size_t SafeGetArrayLength(JNIEnv* env, const JavaRef<JavaArrayType>& jarray) {
ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
const uint8_t* bytes,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jbyteArray byte_array = env->NewByteArray(len_jsize);
jbyteArray byte_array = env->NewByteArray(len);
CheckException(env);
DCHECK(byte_array);
env->SetByteArrayRegion(byte_array, 0, len_jsize,
env->SetByteArrayRegion(byte_array, 0, len,
reinterpret_cast<const jbyte*>(bytes));
CheckException(env);
@ -58,12 +56,11 @@ ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
const bool* bools,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jbooleanArray boolean_array = env->NewBooleanArray(len_jsize);
jbooleanArray boolean_array = env->NewBooleanArray(len);
CheckException(env);
DCHECK(boolean_array);
env->SetBooleanArrayRegion(boolean_array, 0, len_jsize,
env->SetBooleanArrayRegion(boolean_array, 0, len,
reinterpret_cast<const jboolean*>(bools));
CheckException(env);
@ -73,12 +70,11 @@ ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
const int* ints,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jintArray int_array = env->NewIntArray(len_jsize);
jintArray int_array = env->NewIntArray(len);
CheckException(env);
DCHECK(int_array);
env->SetIntArrayRegion(int_array, 0, len_jsize,
env->SetIntArrayRegion(int_array, 0, len,
reinterpret_cast<const jint*>(ints));
CheckException(env);
@ -93,12 +89,11 @@ ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
ScopedJavaLocalRef<jlongArray> ToJavaLongArray(JNIEnv* env,
const int64_t* longs,
size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jlongArray long_array = env->NewLongArray(len_jsize);
jlongArray long_array = env->NewLongArray(len);
CheckException(env);
DCHECK(long_array);
env->SetLongArrayRegion(long_array, 0, len_jsize,
env->SetLongArrayRegion(long_array, 0, len,
reinterpret_cast<const jlong*>(longs));
CheckException(env);
@ -115,12 +110,11 @@ BASE_EXPORT ScopedJavaLocalRef<jlongArray> ToJavaLongArray(
// Returns a new Java float array converted from the given C++ float array.
BASE_EXPORT ScopedJavaLocalRef<jfloatArray>
ToJavaFloatArray(JNIEnv* env, const float* floats, size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jfloatArray float_array = env->NewFloatArray(len_jsize);
jfloatArray float_array = env->NewFloatArray(len);
CheckException(env);
DCHECK(float_array);
env->SetFloatArrayRegion(float_array, 0, len_jsize,
env->SetFloatArrayRegion(float_array, 0, len,
reinterpret_cast<const jfloat*>(floats));
CheckException(env);
@ -135,12 +129,11 @@ BASE_EXPORT ScopedJavaLocalRef<jfloatArray> ToJavaFloatArray(
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray>
ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len) {
const jsize len_jsize = checked_cast<jsize>(len);
jdoubleArray double_array = env->NewDoubleArray(len_jsize);
jdoubleArray double_array = env->NewDoubleArray(len);
CheckException(env);
DCHECK(double_array);
env->SetDoubleArrayRegion(double_array, 0, len_jsize,
env->SetDoubleArrayRegion(double_array, 0, len,
reinterpret_cast<const jdouble*>(doubles));
CheckException(env);
@ -157,12 +150,11 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
JNIEnv* env,
ScopedJavaLocalRef<jclass> clazz,
base::span<const ScopedJavaLocalRef<jobject>> v) {
jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), clazz.obj(), nullptr);
jobjectArray joa = env->NewObjectArray(v.size(), clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, i, v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -178,12 +170,12 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
base::span<const ScopedJavaGlobalRef<jobject>> v) {
ScopedJavaLocalRef<jclass> object_array_clazz =
GetClass(env, "java/lang/Object");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
object_array_clazz.obj(), nullptr);
jobjectArray joa =
env->NewObjectArray(v.size(), object_array_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, i, v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -192,12 +184,11 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) {
jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
jobjectArray joa = env->NewObjectArray(v.size(), type.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, i, v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -206,12 +197,11 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) {
jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
jobjectArray joa = env->NewObjectArray(v.size(), type.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
env->SetObjectArrayElement(joa, i, v[i].obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -220,14 +210,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env,
base::span<const std::string> v) {
ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
byte_array_clazz.obj(), nullptr);
jobjectArray joa =
env->NewObjectArray(v.size(), byte_array_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array = ToJavaByteArray(
env, reinterpret_cast<const uint8_t*>(v[i].data()), v[i].length());
env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
env->SetObjectArrayElement(joa, i, byte_array.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -236,14 +226,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env,
base::span<const std::vector<uint8_t>> v) {
ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
byte_array_clazz.obj(), nullptr);
jobjectArray joa =
env->NewObjectArray(v.size(), byte_array_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array =
ToJavaByteArray(env, v[i].data(), v[i].size());
env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
env->SetObjectArrayElement(joa, i, byte_array.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -252,13 +242,12 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env,
base::span<const std::string> v) {
ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
string_clazz.obj(), nullptr);
jobjectArray joa = env->NewObjectArray(v.size(), string_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF8ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
env->SetObjectArrayElement(joa, i, item.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -269,14 +258,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
ScopedJavaLocalRef<jclass> string_array_clazz =
GetClass(env, "[Ljava/lang/String;");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(vec_outer.size()),
string_array_clazz.obj(), nullptr);
jobjectArray joa =
env->NewObjectArray(vec_outer.size(), string_array_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
env->SetObjectArrayElement(joa, i, inner.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
@ -288,14 +277,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
ScopedJavaLocalRef<jclass> string_array_clazz =
GetClass(env, "[Ljava/lang/String;");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(vec_outer.size()),
string_array_clazz.obj(), nullptr);
jobjectArray joa =
env->NewObjectArray(vec_outer.size(), string_array_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
env->SetObjectArrayElement(joa, i, inner.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
@ -305,13 +294,12 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env,
base::span<const std::u16string> v) {
ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String");
jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
string_clazz.obj(), nullptr);
jobjectArray joa = env->NewObjectArray(v.size(), string_clazz.obj(), nullptr);
CheckException(env);
for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF16ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
env->SetObjectArrayElement(joa, i, item.obj());
}
return ScopedJavaLocalRef<jobjectArray>(env, joa);
}
@ -327,8 +315,7 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
out->resize(back + len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env, static_cast<jstring>(env->GetObjectArrayElement(array.obj(), i)));
ConvertJavaStringToUTF16(env, str.obj(), out->data() + back + i);
}
}
@ -344,8 +331,7 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
out->resize(back + len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env, static_cast<jstring>(env->GetObjectArrayElement(array.obj(), i)));
ConvertJavaStringToUTF8(env, str.obj(), out->data() + back + i);
}
}
@ -361,7 +347,7 @@ void AppendJavaByteArrayToByteVector(JNIEnv* env,
return;
size_t back = out->size();
out->resize(back + len);
env->GetByteArrayRegion(byte_array.obj(), 0, static_cast<jsize>(len),
env->GetByteArrayRegion(byte_array.obj(), 0, len,
reinterpret_cast<int8_t*>(out->data() + back));
}
@ -411,8 +397,7 @@ void JavaIntArrayToIntVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetIntArrayRegion(int_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetIntArrayRegion(int_array.obj(), 0, len, out->data());
}
void JavaLongArrayToInt64Vector(JNIEnv* env,
@ -433,8 +418,7 @@ void JavaLongArrayToLongVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetLongArrayRegion(long_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetLongArrayRegion(long_array.obj(), 0, len, out->data());
}
void JavaFloatArrayToFloatVector(JNIEnv* env,
@ -445,8 +429,7 @@ void JavaFloatArrayToFloatVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetFloatArrayRegion(float_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetFloatArrayRegion(float_array.obj(), 0, len, out->data());
}
void JavaDoubleArrayToDoubleVector(JNIEnv* env,
@ -457,8 +440,7 @@ void JavaDoubleArrayToDoubleVector(JNIEnv* env,
out->resize(len);
if (!len)
return;
env->GetDoubleArrayRegion(double_array.obj(), 0, static_cast<jsize>(len),
out->data());
env->GetDoubleArrayRegion(double_array.obj(), 0, len, out->data());
}
void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
@ -469,9 +451,9 @@ void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
out->resize(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array(
env, static_cast<jbyteArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
size_t bytes_len = SafeGetArrayLength(env, bytes_array);
env,
static_cast<jbyteArray>(env->GetObjectArrayElement(array.obj(), i)));
jsize bytes_len = env->GetArrayLength(bytes_array.obj());
jbyte* bytes = env->GetByteArrayElements(bytes_array.obj(), nullptr);
(*out)[i].assign(reinterpret_cast<const char*>(bytes), bytes_len);
env->ReleaseByteArrayElements(bytes_array.obj(), bytes, JNI_ABORT);
@ -486,8 +468,8 @@ void JavaArrayOfByteArrayToBytesVector(JNIEnv* env,
out->resize(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array(
env, static_cast<jbyteArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env,
static_cast<jbyteArray>(env->GetObjectArrayElement(array.obj(), i)));
JavaByteArrayToByteVector(env, bytes_array, &(*out)[i]);
}
}
@ -501,8 +483,8 @@ void Java2dStringArrayTo2dStringVector(
out->resize(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array(
env, static_cast<jobjectArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env,
static_cast<jobjectArray>(env->GetObjectArrayElement(array.obj(), i)));
out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
@ -518,8 +500,8 @@ void Java2dStringArrayTo2dStringVector(
out->resize(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array(
env, static_cast<jobjectArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env,
static_cast<jobjectArray>(env->GetObjectArrayElement(array.obj(), i)));
out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
@ -534,8 +516,8 @@ void JavaArrayOfIntArrayToIntVector(JNIEnv* env,
out->resize(len);
for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jintArray> int_array(
env, static_cast<jintArray>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
env,
static_cast<jintArray>(env->GetObjectArrayElement(array.obj(), i)));
JavaIntArrayToIntVector(env, int_array, &out->at(i));
}
}

View File

@ -99,5 +99,6 @@ java_annotation_processor("jni_processor") {
"//third_party/android_deps:com_google_auto_service_auto_service_annotations_java",
"//third_party/android_deps:com_google_guava_guava_java",
"//third_party/android_deps:com_squareup_javapoet_java",
"//third_party/android_deps:javax_annotation_jsr250_api_java",
]
}

View File

@ -20,17 +20,15 @@ def CommonChecks(input_api, output_api):
'PYTHONDONTWRITEBYTECODE': '1',
})
return input_api.RunTests(
input_api.canned_checks.GetUnitTests(
return input_api.canned_checks.RunUnitTests(
input_api,
output_api,
run_on_python2=False,
unit_tests=[
input_api.os_path.join(base_android_jni_generator_dir,
'jni_generator_tests.py')
],
env=env,
))
)
def CheckChangeOnUpload(input_api, output_api):

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@ -11,6 +11,8 @@ code generator and ensures the output matches a golden
file.
"""
from __future__ import print_function
import collections
import difflib
import inspect

View File

@ -6,7 +6,6 @@
#include "base/android/jni_android.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/utf_string_conversions.h"
namespace {
@ -14,8 +13,8 @@ namespace {
// Internal version that does not use a scoped local pointer.
jstring ConvertUTF16ToJavaStringImpl(JNIEnv* env,
const base::StringPiece16& str) {
jstring result = env->NewString(reinterpret_cast<const jchar*>(str.data()),
base::checked_cast<jsize>(str.length()));
jstring result =
env->NewString(reinterpret_cast<const jchar*>(str.data()), str.length());
base::android::CheckException(env);
return result;
}
@ -33,7 +32,7 @@ void ConvertJavaStringToUTF8(JNIEnv* env, jstring str, std::string* result) {
return;
}
const jsize length = env->GetStringLength(str);
if (length <= 0) {
if (!length) {
result->clear();
CheckException(env);
return;
@ -43,8 +42,7 @@ void ConvertJavaStringToUTF8(JNIEnv* env, jstring str, std::string* result) {
// function that yields plain (non Java-modified) UTF8.
const jchar* chars = env->GetStringChars(str, NULL);
DCHECK(chars);
UTF16ToUTF8(reinterpret_cast<const char16_t*>(chars),
static_cast<size_t>(length), result);
UTF16ToUTF8(reinterpret_cast<const char16_t*>(chars), length, result);
env->ReleaseStringChars(str, chars);
CheckException(env);
}
@ -86,7 +84,7 @@ void ConvertJavaStringToUTF16(JNIEnv* env,
return;
}
const jsize length = env->GetStringLength(str);
if (length <= 0) {
if (!length) {
result->clear();
CheckException(env);
return;
@ -95,8 +93,7 @@ void ConvertJavaStringToUTF16(JNIEnv* env,
DCHECK(chars);
// GetStringChars isn't required to NULL-terminate the strings
// it returns, so the length must be explicitly checked.
result->assign(reinterpret_cast<const char16_t*>(chars),
static_cast<size_t>(length));
result->assign(reinterpret_cast<const char16_t*>(chars), length);
env->ReleaseStringChars(str, chars);
CheckException(env);
}

View File

@ -0,0 +1,30 @@
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
results = []
# Dictionary of base/android files with corresponding Robolectric shadows.
# If new functions are added to the original file, it is very likely that
# function with the same signature should be added to the shadow.
impl_to_shadow_paths = {
'base/android/java/src/org/'
'chromium/base/metrics/RecordHistogram.java':
'base/android/junit/src/org/'
'chromium/base/metrics/test/ShadowRecordHistogram.java'
}
for impl_path, shadow_path in impl_to_shadow_paths.items():
if impl_path in input_api.change.LocalPaths():
if shadow_path not in input_api.change.LocalPaths():
results.append(
output_api.PresubmitPromptWarning(
'You modified the runtime class:\n {}\n'
'without changing the corresponding shadow test class'
':\n {}\n').format(impl_path, shadow_path))
return results

Some files were not shown because too many files have changed in this diff Show More