Compare commits

...

No commits in common. "97915a596d1e05b691a7d535ee0d36215ef2b064" and "545c85631134e23e79bfad22721dc310d0592191" have entirely different histories.

2839 changed files with 135023 additions and 89799 deletions

View File

@ -1 +1 @@
104.0.5112.79 105.0.5195.52

View File

@ -22,6 +22,7 @@ Abhinav Vij <abhinav.vij@samsung.com>
Abhishek Agarwal <abhishek.a21@samsung.com> Abhishek Agarwal <abhishek.a21@samsung.com>
Abhishek Kanike <abhishek.ka@samsung.com> Abhishek Kanike <abhishek.ka@samsung.com>
Abhishek Singh <abhi.rathore@samsung.com> Abhishek Singh <abhi.rathore@samsung.com>
Abin K Paul <abin.paul1@gmail.com>
Abul Hasan Md Osama <ahm.osama@samsung.com> Abul Hasan Md Osama <ahm.osama@samsung.com>
Adam Bonner <abonner-chromium@solscope.com> Adam Bonner <abonner-chromium@solscope.com>
Adam Bujalski <abujalski@gmail.com> Adam Bujalski <abujalski@gmail.com>
@ -57,6 +58,7 @@ Alex Gaynor <alex.gaynor@gmail.com>
Alex Henrie <alexhenrie24@gmail.com> Alex Henrie <alexhenrie24@gmail.com>
Alex Scheele <alexscheele@gmail.com> Alex Scheele <alexscheele@gmail.com>
Alexander Douglas <agdoug@amazon.com> Alexander Douglas <agdoug@amazon.com>
Alexander Forrence <alex.forrence@gmail.com>
Alexander Guettler <alexander@guettler.io> Alexander Guettler <alexander@guettler.io>
Alexander Rezepkin <etu@vivaldi.net> Alexander Rezepkin <etu@vivaldi.net>
Alexander Shalamov <alexander.shalamov@intel.com> Alexander Shalamov <alexander.shalamov@intel.com>
@ -181,6 +183,7 @@ Brian Yip <itsbriany@gmail.com>
Brook Hong <hzgmaxwell@gmail.com> Brook Hong <hzgmaxwell@gmail.com>
Bruno Calvignac <bruno@flock.com> Bruno Calvignac <bruno@flock.com>
Bruno de Oliveira Abinader <brunoabinader@gmail.com> Bruno de Oliveira Abinader <brunoabinader@gmail.com>
Bruno Pitrus <brunopitrus@hotmail.com>
Bruno Roy <brusi_roy@hotmail.com> Bruno Roy <brusi_roy@hotmail.com>
Bryan Donlan <bdonlan@gmail.com> Bryan Donlan <bdonlan@gmail.com>
Bryce Thomas <bryct@amazon.com> Bryce Thomas <bryct@amazon.com>
@ -382,6 +385,7 @@ Gajendra Singh <wxjg68@motorola.com>
Ganesh Borle <ganesh.borle@samsung.com> Ganesh Borle <ganesh.borle@samsung.com>
Gao Chun <chun.gao@intel.com> Gao Chun <chun.gao@intel.com>
Gao Chun <gaochun.dev@gmail.com> Gao Chun <gaochun.dev@gmail.com>
Gao Yu <wanggao@tencent.com>
Gaurav Dhol <gaurav.dhol@einfochips.com> Gaurav Dhol <gaurav.dhol@einfochips.com>
Gautham Banasandra <gautham.bangalore@gmail.com> Gautham Banasandra <gautham.bangalore@gmail.com>
George Adams <geoada@amazon.com> George Adams <geoada@amazon.com>
@ -471,6 +475,7 @@ Irmak Kavasoglu <irmakkavasoglu@gmail.com>
Isaac Murchie <murchieisaac@gmail.com> Isaac Murchie <murchieisaac@gmail.com>
Isaac Reilly <reillyi@amazon.com> Isaac Reilly <reillyi@amazon.com>
Ivan Naydonov <samogot@gmail.com> Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com> Ivan Sham <ivansham@amazon.com>
Jack Bates <jack@nottheoilrig.com> Jack Bates <jack@nottheoilrig.com>
Jacky Hu <flameddd@gmail.com> Jacky Hu <flameddd@gmail.com>
@ -512,6 +517,7 @@ Jay Soffian <jaysoffian@gmail.com>
Jeado Ko <haibane84@gmail.com> Jeado Ko <haibane84@gmail.com>
Jeffrey C <jeffreyca16@gmail.com> Jeffrey C <jeffreyca16@gmail.com>
Jeffrey Yeung <jeffrey.yeung@poly.com> Jeffrey Yeung <jeffrey.yeung@poly.com>
Jelle Bleyaert <jellebley@gmail.com>
Jeong A Shin <jeonga@khu.ac.kr> Jeong A Shin <jeonga@khu.ac.kr>
Jeongeun Kim <je_julie.kim@samsung.com> Jeongeun Kim <je_julie.kim@samsung.com>
Jeongmin Kim <kimwjdalsl@gmail.com> Jeongmin Kim <kimwjdalsl@gmail.com>
@ -700,6 +706,7 @@ Lenny Khazan <lenny.khazan@gmail.com>
Leo Wolf <jclw@ymail.com> Leo Wolf <jclw@ymail.com>
Leon Han <leon.han@intel.com> Leon Han <leon.han@intel.com>
Leung Wing Chung <lwchkg@gmail.com> Leung Wing Chung <lwchkg@gmail.com>
Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com> Li Yin <li.yin@intel.com>
Lidwine Genevet <lgenevet@cisco.com> Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com> Lin Sun <lin.sun@intel.com>
@ -907,6 +914,7 @@ Pedro Tôrres <t0rr3s.p3dr0@gmail.com>
Peng Hu <penghu@tencent.com> Peng Hu <penghu@tencent.com>
Peng Jiang <leiyi.jp@gmail.com> Peng Jiang <leiyi.jp@gmail.com>
Peng Xinchao <pxinchao@gmail.com> Peng Xinchao <pxinchao@gmail.com>
Peng Zhou <zhoupeng.1996@bytedance.com>
Peng-Yu Chen <pengyu@libstarrify.so> Peng-Yu Chen <pengyu@libstarrify.so>
Pei Wang <wangpei@uniontech.com> Pei Wang <wangpei@uniontech.com>
Peter Bright <drpizza@quiscalusmexicanus.org> Peter Bright <drpizza@quiscalusmexicanus.org>
@ -938,6 +946,7 @@ Praveen Akkiraju <praveen.anp@samsung.com>
Preeti Nayak <preeti.nayak@samsung.com> Preeti Nayak <preeti.nayak@samsung.com>
Pritam Nikam <pritam.nikam@samsung.com> Pritam Nikam <pritam.nikam@samsung.com>
Puttaraju R <puttaraju.r@samsung.com> Puttaraju R <puttaraju.r@samsung.com>
Qi Tiezheng <qitiezheng@360.cn>
Qi Yang <qi1988.yang@samsung.com> Qi Yang <qi1988.yang@samsung.com>
Qiang Zeng <zengqiang1@huawei.com> Qiang Zeng <zengqiang1@huawei.com>
Qiankun Miao <qiankun.miao@intel.com> Qiankun Miao <qiankun.miao@intel.com>
@ -1140,10 +1149,12 @@ Sungmann Cho <sungmann.cho@navercorp.com>
Sunil Ratnu <sunil.ratnu@samsung.com> Sunil Ratnu <sunil.ratnu@samsung.com>
Sunitha Srivatsa <srivats@amazon.com> Sunitha Srivatsa <srivats@amazon.com>
Sunwoo Nam <jegalzz88@gmail.com> Sunwoo Nam <jegalzz88@gmail.com>
Suresh Guttula <suresh.guttula@amd.corp-partner.google.com>
Surya K M <suryagowda590@gmail.com> Surya K M <suryagowda590@gmail.com>
Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com> Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com>
Suvanjan Mukherjee <suvanjanmukherjee@gmail.com> Suvanjan Mukherjee <suvanjanmukherjee@gmail.com>
Suyambulingam R M <suyambu.rm@samsung.com> Suyambulingam R M <suyambu.rm@samsung.com>
Suyash Nayan <suyashnyn1@gmail.com>
Suyash Sengar <suyash.s@samsung.com> Suyash Sengar <suyash.s@samsung.com>
Swarali Raut <swarali.sr@samsung.com> Swarali Raut <swarali.sr@samsung.com>
Swati Jaiswal <swa.jaiswal@samsung.com> Swati Jaiswal <swa.jaiswal@samsung.com>
@ -1159,6 +1170,7 @@ Taehoon Lee <taylor.hoon@gmail.com>
Taeseong Yu <yugeeklab@gmail.com> Taeseong Yu <yugeeklab@gmail.com>
Taeyeon Kim <ssg9732@gmail.com> Taeyeon Kim <ssg9732@gmail.com>
Tae Shin <taeshindev@gmail.com> Tae Shin <taeshindev@gmail.com>
Taher Ali <taher.dasten@gmail.com>
Takaaki Suzuki <takaakisuzuki.14@gmail.com> Takaaki Suzuki <takaakisuzuki.14@gmail.com>
Takahiro Aoyagi <hogehoge@gachapin.jp> Takahiro Aoyagi <hogehoge@gachapin.jp>
Takashi Fujita <tgfjt.mail@gmail.com> Takashi Fujita <tgfjt.mail@gmail.com>
@ -1329,6 +1341,7 @@ Zheng Xu <zxu@kobo.com>
Zhengkun Li <zhengkli@amazon.com> Zhengkun Li <zhengkli@amazon.com>
Zhenyu Liang <zhenyu.liang@intel.com> Zhenyu Liang <zhenyu.liang@intel.com>
Zhenyu Shan <zhenyu.shan@intel.com> Zhenyu Shan <zhenyu.shan@intel.com>
Zhibo Wang <zhibo1.wang@intel.com>
Zhifei Fang <facetothefate@gmail.com> Zhifei Fang <facetothefate@gmail.com>
Zhiyuan Ye <zhiyuanye@tencent.com> Zhiyuan Ye <zhiyuanye@tencent.com>
Zhuoyu Qian <zhuoyu.qian@samsung.com> Zhuoyu Qian <zhuoyu.qian@samsung.com>
@ -1383,6 +1396,7 @@ Loongson Technology Corporation Limited. <*@loongson.cn>
Macadamian <*@macadamian.com> Macadamian <*@macadamian.com>
Mail.ru Group <*@corp.mail.ru> Mail.ru Group <*@corp.mail.ru>
Make Positive Provar Limited <*@provartesting.com> Make Positive Provar Limited <*@provartesting.com>
Mc Zeng <zengmcong@gmail.com>
Mediatek <*@mediatek.com> Mediatek <*@mediatek.com>
Microsoft <*@microsoft.com> Microsoft <*@microsoft.com>
MIPS Technologies, Inc. <*@mips.com> MIPS Technologies, Inc. <*@mips.com>

336
src/DEPS
View File

@ -144,6 +144,19 @@ vars = {
# tools/clang/OWNERS before depending on it. # tools/clang/OWNERS before depending on it.
'checkout_clang_libs': 'use_rust', 'checkout_clang_libs': 'use_rust',
# Fetch prebuilt and prepackaged Bazel binary/executable. Bazel is currently
# only needed by `chromium/src/tools/rust/build_crubit.py` and therefore
# shouldn't be used outside of Chromium Rust Experiments project.
# Furthermore note that Bazel is only needed when building Crubit during Rust
# toolchain build (and is *not* needed during regular Chromium builds).
'checkout_bazel': False,
# Fetch Crubit support libraries in order to build ..._rs_api.rs and
# ..._rs_api_impl.cc that are generated by prebuilt (see
# tools/rust/build_crubit.py) Crubit tools during Chromium build (see
# also //build/rust/rs_bindings_from_cc.gni).
'checkout_crubit': 'use_rust',
# By default checkout the OpenXR loader library only on Windows. The OpenXR # By default checkout the OpenXR loader library only on Windows. The OpenXR
# backend for VR in Chromium is currently only supported for Windows, but # backend for VR in Chromium is currently only supported for Windows, but
# support for other platforms may be added in the future. # support for other platforms may be added in the future.
@ -184,6 +197,12 @@ vars = {
# qemu on linux-arm64 machines. # qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False, 'checkout_fuchsia_for_arm64_host': False,
# Revision of Crubit (trunk on 2022-07-14). This should typically be the
# same as the revision specified in CRUBIT_REVISION in
# tools/rust/update_rust.py. More details and roll instructions can be
# found in tools/rust/README.md.
'crubit_revision': 'd9b0ad4c09b46328dcc7a5ec28ce86cca56e0389',
# Run 'vpython_common' hook if this is set. # Run 'vpython_common' hook if this is set.
# TODO(crbug.com/1329052): remove this when we remove .vpython. # TODO(crbug.com/1329052): remove this when we remove .vpython.
'enable_vpython_common_crbug_1329052': True, 'enable_vpython_common_crbug_1329052': True,
@ -214,7 +233,7 @@ vars = {
# #
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab # CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '14844.0.0', 'lacros_sdk_version': '14977.0.0',
# Generate location tag metadata to include in tests result data uploaded # Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates # to ResultDB. This isn't needed on some configs and the tool that generates
@ -226,7 +245,7 @@ vars = {
# luci-go CIPD package version. # luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder. # Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:de014227dd270df7c61bfab740eb4ae4b52ac2a7', 'luci_go': 'git_revision:a0ba80649473055bae3d789eec28c9967adb5e45',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD # This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision. # instead of downloading the prebuilt pinned revision.
@ -250,6 +269,9 @@ vars = {
# for info on RTS # for info on RTS
'checkout_rts_model': False, 'checkout_rts_model': False,
# Use the experimental version of the RTS model
'checkout_rts_experimental_model': False,
# By default, do not check out the re-client binaries. # By default, do not check out the re-client binaries.
'checkout_reclient': False, 'checkout_reclient': False,
@ -257,7 +279,7 @@ vars = {
'dawn_standalone': False, 'dawn_standalone': False,
# reclient CIPD package version # reclient CIPD package version
'reclient_version': 're_client_version:0.68.0.2c94334-gomaip', 'reclient_version': 're_client_version:0.72.0.b874055-gomaip',
# Enable fetching Rust-related packages. # Enable fetching Rust-related packages.
'use_rust': False, 'use_rust': False,
@ -275,34 +297,34 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia # the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other. # and whatever else without interference from each other.
'skia_revision': '5a4dbb2e97302f7e574f0ba962ac4d8b3a62a36e', 'skia_revision': 'f204b137b97b44b7397de173fc54181c37ac6501',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8 # the commit queue can handle CLs rolling V8
# and whatever else without interference from each other. # and whatever else without interference from each other.
'v8_revision': '3d67ad243ce92b9fb162cc85da1dc1a0ebe4c78b', 'v8_revision': 'b1f56b4a8a7cf9f707f7966104278777f9994b13',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE # the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other. # and whatever else without interference from each other.
'angle_revision': '2693b03eba82a424a19febaacaab4115a45b7682', 'angle_revision': '2f0d8ab049b10ee41f9b90cea8da8e80db076e38',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader # the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other. # and whatever else without interference from each other.
'swiftshader_revision': 'd68d367047f562e0e0716fd72b9a2bc3e2c8f4c0', 'swiftshader_revision': '16e026a959f1bc80ff237aa81b4a63b52517dec1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium # the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other. # and whatever else without interference from each other.
'pdfium_revision': 'c7c276ce1192f043affb2098ac7ce44f7fd7f084', 'pdfium_revision': 'd14da8e682e244127db32490365d1c094243e5f3',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL # the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other. # and whatever else without interference from each other.
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '1e469e45a46ff580899cbef939babe02ad916c85', 'boringssl_revision': 'b95124305ab15c7523d3e21437309fa5dd717ee8',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk # the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other. # and whatever else without interference from each other.
'fuchsia_version': 'version:8.20220609.0.1', 'fuchsia_version': 'version:9.20220720.2.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -318,23 +340,23 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling lss # the commit queue can handle CLs rolling lss
# and whatever else without interference from each other. # and whatever else without interference from each other.
'lss_revision': '1d387f43f3702818f7fc04d334539a2f05cafccd', 'lss_revision': '0d6435b731ef91d5182eaecff82ae96764222c48',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl # the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nacl_revision': '77f46a8ec552f316f64aaa82b7f06da9fceb036b', 'nacl_revision': '18d9964d47fc44f49a4c19b7ba91197ddca00c6a',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_revision': 'b11074cf6dce78d0bc79ff7996dec70ca3abe4a9', 'freetype_revision': '275b116b40c9d183d42242099ea9ff276985855b',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_testing_revision': 'fe2eddae6176f75e2101289eeda22a5ff3d808ca', 'freetype_testing_revision': '1ca0c99d25ae3b1e4c70513c1bf74643fc3dee09',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz # the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other. # and whatever else without interference from each other.
'harfbuzz_revision': 'c88a6a9ec3c38793ec8b662362282e076e948943', 'harfbuzz_revision': 'fa471043fccb94444510e3300ac2573297c82137',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter # the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -342,11 +364,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling OTS # the commit queue can handle CLs rolling OTS
# and whatever else without interference from each other. # and whatever else without interference from each other.
'ots_revision': 'ee537ac096667eed6559124164c3e8482646fd77', 'ots_revision': '46bea9879127d0ff1c6601b078e2ce98e83fcd33',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult # the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other. # and whatever else without interference from each other.
'catapult_revision': '8a8c0b9c3967e17da1d9f64b3d57e4a969b465a7', 'catapult_revision': 'b6e934ef32e6591ad60636e3fe167d0e3e9aa5d4',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer # the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -354,11 +376,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend # the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other. # and whatever else without interference from each other.
'devtools_frontend_revision': 'ed8fe6a3c528058bcc8e95b9071be6d6cb1b45f2', 'devtools_frontend_revision': 'b1ac4239dc5fffa56170e7367a03f35d2eaa223c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator # the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libprotobuf-mutator': '8942a9ba43d8bb196230c321d46d6a137957a719', 'libprotobuf-mutator': 'a304ec48dcf15d942607032151f7e9ee504b5dcf',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version # the commit queue can handle CLs rolling android_sdk_build-tools_version
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -390,11 +412,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'dawn_revision': '2d334ed4f01b00211acd0360a5f9dc91d83354cb', 'dawn_revision': '1e98a9ba4a64301e0ab932e22ce989688f6cdf6c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'quiche_revision': '26920de2d369289246585fa1b236ef2ef516d317', 'quiche_revision': '53c94d968dd6a0cf748caf42462a0b676f95530c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit # the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -406,7 +428,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling wuffs # the commit queue can handle CLs rolling wuffs
# and whatever else without interference from each other. # and whatever else without interference from each other.
'wuffs_revision': 'ebbecaa2fb439eff0aeedafadb4c2a984446dee8', 'wuffs_revision': 'a8205c2fe7564b12fea81ee028ba670112cc7719',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libgifcodec # the commit queue can handle CLs rolling libgifcodec
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -414,11 +436,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif # the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libavif_revision': '1dedea43710c55a1bf1c675126cb6cb546c7b2a7', 'libavif_revision': 'dd2d67c5f976038354c0406a253e26dd2abc4632',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby # the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nearby_revision': 'd8453565db739ebbfbbeade9ec8f8239057b1e13', 'nearby_revision': 'd2c401112cc577fe3c5f9a11329bb557048af31a',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage # the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -434,15 +456,15 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libcxxabi_revision': '013bcd820a353ec1d3ff472f2971553fea7232cf', 'libcxxabi_revision': 'b954e3e65634a9e2f7b595598a30c455f5f2eb26',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libunwind_revision': '1644d070bf374182739543c3a12df88dfec0b88a', 'libunwind_revision': '955e2ff5fbb15791fea263c1c80e1ec6b3c5ee61',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'clang_format_revision': 'e435ad79c17b1888b34df88d6a30a094936e3836', 'clang_format_revision': '8b525d2747f2584fc35d8c7e612e66f377858df7',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -451,13 +473,17 @@ vars = {
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'highway_revision': '424360251cdcfc314cfc528f53c872ecd63af0f0', 'highway_revision': '424360251cdcfc314cfc528f53c872ecd63af0f0',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': '880df5ede50a8534c8116d0d50e4bc4f3ef08a06',
# If you change this, also update the libc++ revision in # If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni. # //buildtools/deps_revisions.gni.
'libcxx_revision': 'b1269813eaf5b8ac78e35e45a0f7cc320bd3e7d6', 'libcxx_revision': '88bf4070487fbe9020697a2281743b91e5e29bef',
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:2ecd43a10266bd091c98e6dcde507c64f6a0dad3', 'gn_version': 'git_revision:9ef321772ecc161937db69acb346397e0ccc484d',
} }
# Only these hosts are allowed for dependencies in this DEPS file. # Only these hosts are allowed for dependencies in this DEPS file.
@ -538,7 +564,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_rust_toolchain/linux-amd64', 'package': 'chromium/third_party/android_rust_toolchain/linux-amd64',
'version': 'BKAbvHjGv4-os-v8MS3I54bPsY-397xgaJ3yBeIVS20C', 'version': 'rMU9JFlwRfB-5VEWgDPRFYme5sXSnsHPSE3uQXf1xBQC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -608,7 +634,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_mac_universal', 'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': '3UoNd4X57YAONz6mhQPapIUoO1Ds6K5AIHTAZxNmLUUC', 'version': 'RBRqNIwSXHjdZf4BVjWk8enaKIHw58aQGFDVNozlbWIC',
}, },
], ],
}, },
@ -619,7 +645,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86', 'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'vTjr7sgsZvG680Qx0nhiR3H4-EAJDdh5m1BkVbbuER0C', 'version': 'tqlS-vFYsn2LVSJMwipq84EKLmwKa1XJb760NnpQL2gC',
}, },
], ],
}, },
@ -630,7 +656,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chrome_win_x86_64', 'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'dQu9E9-wPljBkp6aaMxzCt9WJdBI_u_w2N1VRS2Mdf4C', 'version': 'RthX5RzppvSV7uq2P6pm2bnv6-dvoHpUIOsZFk57ZMEC',
}, },
], ],
}, },
@ -642,7 +668,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_amd64', 'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': '1RmnK4JbmordT5NJbib3mfagpHvrO-oJaTW4HKsYgmAC', 'version': '7MvxvS-pmZP1iAXQoCiLI7nv4UkDiyw8PC1ycwpYWbYC',
}, },
], ],
}, },
@ -654,7 +680,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_mac_arm64', 'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': '-o_WPHLr3JzvHdsTl_E0AgEV2D2--sBNqVT_F74hvOIC', 'version': 'd5lN7fzV07O4-Mu_T8TshrGQtlR9F508p9cdhchcLpYC',
}, },
], ],
}, },
@ -665,7 +691,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86', 'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'zdcQMuiuIXnQQ4eBsYyk0uNTr4N0LaphcQYs0RUByZQC', 'version': '8zehx-DVmaf_FplPe23acLAStf3Z7anQ3CY9LXBfvD0C',
}, },
], ],
}, },
@ -676,7 +702,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/updater/chromium_win_x86_64', 'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': '77COFCYoGCHrnvDJZFC8GoYea4B8GeTVWNKpUZBCwe4C', 'version': 'KE8JnjZFOyHxUhVdRkm0IMVqlZIaYPnAOI-zxtUD4zUC',
}, },
], ],
}, },
@ -747,16 +773,16 @@ deps = {
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248', Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/docs/website': { 'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + 'ffb0b23dc2833849a0b1e57f25caba219e717963', 'url': Var('chromium_git') + '/website.git' + '@' + '3965ba67f8d283378e6c0b64d634b91fb830a378',
}, },
'src/ios/third_party/earl_grey2/src': { 'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '16126e61e3b831b005a91d83cecefd54628530bb', 'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '53a2982c85ac6cf802719603d037ad3be7091ebb',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/edo/src': { 'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '07ef2f35406e9f04cf86d11da5cc62d58f36cb15', 'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '3d3dcee71993376f3abcf3457b046c1df6c13182',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -765,13 +791,8 @@ deps = {
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/gcdwebserver/src': {
'url': Var('chromium_git') + '/external/github.com/swisspol/GCDWebServer.git' + '@' + '43555c66627f6ed44817855a0f6d465f559d30e0',
'condition': 'checkout_ios',
},
'src/ios/third_party/material_components_ios/src': { 'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '8225446a8df15e53e96304852f134259e0e5e2df', 'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '425d641798c86ab809fcb067bbb265958756af98',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -796,7 +817,7 @@ deps = {
}, },
'src/ios/third_party/material_text_accessibility_ios/src': { 'src/ios/third_party/material_text_accessibility_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-foundation/material-text-accessibility-ios.git' + '@' + '197375261e25ee5d473219d0f353a1f635f5393d', 'url': Var('chromium_git') + '/external/github.com/material-foundation/material-text-accessibility-ios.git' + '@' + '8cd910c1c8bbae261ae0d7e873ed96c69a386448',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -841,7 +862,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/linux-amd64', 'package': 'chromium/rts/model/linux-amd64',
'version': 'gpPYMz2bs2vn_NVI9vEIw6a1w2oQ7ck6CsoRISA8fgoC', 'version': 'XlzIsX8AH06QHVAMzpKt5aT3nfupjnBr78ztG18pXdsC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -852,7 +873,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/mac-amd64', 'package': 'chromium/rts/model/mac-amd64',
'version': 'swy7IasO6Zn8zMTIWXP3ixCvml7S_2vUby60ZuPlFdEC', 'version': 'CPhzNoasDtJ45F8bwTLs7lIQDiy-PhdReFmXrlL5FDoC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -863,13 +884,24 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/windows-amd64', 'package': 'chromium/rts/model/windows-amd64',
'version': 'eIvALKHzZkCX0uLI5LLb8fpo2ckXFiu_dh4_8SdQyE8C', 'version': 'XmZtpYZGxTqwBMgEXpdyfrdCxx79QfYeVlhpviF2OUcC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
'condition': 'checkout_rts_model and checkout_win', 'condition': 'checkout_rts_model and checkout_win',
}, },
'src/testing/rts/rts-ml-chromium/linux-amd64': {
'packages': [
{
'package': 'experimental/google.com/sshrimp/chromium/rts/model/linux-amd64',
'version': '3K1dz8hGV_xBeEcPKmXfrPYWCwXdRf6KVVLrg7AuJ0sC',
},
],
'dep_type': 'cipd',
'condition': 'checkout_rts_experimental_model and checkout_linux',
},
'src/tools/luci-go': { 'src/tools/luci-go': {
'packages': [ 'packages': [
{ {
@ -920,7 +952,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/androidx', 'package': 'chromium/third_party/androidx',
'version': 'rMO5RMLXC5cyz-Lk9zE9bV66Gc8ggeO7EPkCxYXNO2oC', 'version': 'gAsD4l8EoP_W0IH5vzedZ1tyN3-wAP8-fqkaS_mX6rcC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -953,7 +985,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/aapt2', 'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'jziPmg_EUjoj-eSkO24eMaf5ylm449Q7BKO1-ga2tbgC', 'version': 'hf9C5IyJUUGaBnzqu60xiFJSyfAmjqjc_PiNXNVc9l0C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -964,7 +996,29 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/bundletool', 'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'AqsPZpWJh-ZyGraHKlbH8XgjRnmyDmolX4HhwPEo9XUC', 'version': 'qLkNwA6wjoqznVqaa151GelgGBP4X495n0z-jluACPcC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_build_tools/lint': {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'INnGGTfg5gGJutJiBtWI6-QwusHDDnKvZzI53Q3UiecC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_build_tools/manifest_merger': {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': '0WkAedh1tJB8lzisWJRT80UjpacKLltuV7NqP-0tx9gC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1004,7 +1058,7 @@ deps = {
}, },
{ {
'package': 'chromium/third_party/android_sdk/public/cmdline-tools', 'package': 'chromium/third_party/android_sdk/public/cmdline-tools',
'version': 'PGPmqJtSIQ84If155ba7iTU846h5WJ-bL5d_OoUWEWYC', 'version': 'IPzAG-uU5zVMxohpg9-7-N0tQC1TCSW1VbrBFw7Ld04C',
}, },
], ],
'condition': 'checkout_android_native_support', 'condition': 'checkout_android_native_support',
@ -1015,7 +1069,7 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'), Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src': 'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'd2a0b6188bcbae674f8ef2c42c7cffc908ac632e', Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '1d7dd0490808a8a972949521cc314e42d085c69f',
'src/third_party/dav1d/libdav1d': 'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b', Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1047,17 +1101,6 @@ deps = {
'src/third_party/boringssl/src': 'src/third_party/boringssl/src':
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'), Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/bouncycastle': {
'packages': [
{
'package': 'chromium/third_party/bouncycastle',
'version': 'c078e87552ba26e776566fdaf0f22cd8712743d0',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/breakpad/breakpad': 'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'c4c43b80ea8854c57a4374ac32579b577172dc23', Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'c4c43b80ea8854c57a4374ac32579b577172dc23',
@ -1084,7 +1127,7 @@ deps = {
}, },
'src/third_party/cast_core/public/src': 'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + '6c053df4fe5aea168ca651e2d95dc5dc40ebe059', Var('chromium_git') + '/cast_core/public' + '@' + '8ba5ff47563d0ca8233e8fa009377ed14a560cf4',
'src/third_party/catapult': 'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'), Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1113,7 +1156,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple # Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild. # Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': { 'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '41d2e02e8d1438445eee7463ae4536fe6280e0fc', 'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '60350fcfeb0bca00eba2794f53661a1b996a79a5',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1131,12 +1174,17 @@ deps = {
# For Linux and Chromium OS. # For Linux and Chromium OS.
'src/third_party/cros_system_api': { 'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '315a946dfdf2b0d850e35ec2dbca7ad45525c6ff', 'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'f3b1373caf7bd717be4f0d21ab8c738c6bfcf418',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
'src/third_party/crubit/src': {
'url': Var('chromium_git') + '/external/github.com/google/crubit.git' + '@' + Var('crubit_revision'),
'condition': 'checkout_crubit',
},
'src/third_party/depot_tools': 'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'a4b3602457c422f8ed054ff3673baaecedc2972a', Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '0ba2fd429dd6db431fcbee6995c1278d2a3657a0',
'src/third_party/devtools-frontend/src': 'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'), Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
@ -1145,7 +1193,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d', Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src': 'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b02c384ef4e8eba7b8bdef16f9dc6f8f4d6a6b2b', Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '0e187141679fdb91da33249d18cb79a011c0e2ea',
'src/third_party/emoji-metadata/src': { 'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb', 'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
@ -1167,13 +1215,13 @@ deps = {
Var('chromium_git') + '/external/github.com/google/farmhash.git' + '@' + '816a4ae622e964763ca0862d9dbd19324a1eaf45', Var('chromium_git') + '/external/github.com/google/farmhash.git' + '@' + '816a4ae622e964763ca0862d9dbd19324a1eaf45',
'src/third_party/ffmpeg': 'src/third_party/ffmpeg':
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + 'abfc2628f25d283c27ffc960a8ff820ae8110467', Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + Var('ffmpeg_revision'),
'src/third_party/flac': 'src/third_party/flac':
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + 'af862024c8c8fa0ae07ced05e89013d881b00596', Var('chromium_git') + '/chromium/deps/flac.git' + '@' + 'af862024c8c8fa0ae07ced05e89013d881b00596',
'src/third_party/flatbuffers/src': 'src/third_party/flatbuffers/src':
Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + 'b8aaccee8248059b2af032cca0eb1d2ddbdb6cdc', Var('chromium_git') + '/external/github.com/google/flatbuffers.git' + '@' + '0fe13cb28ce5a3fb81f654b21cb37c9821194962',
# Used for embedded builds. CrOS & Linux use the system version. # Used for embedded builds. CrOS & Linux use the system version.
'src/third_party/fontconfig/src': { 'src/third_party/fontconfig/src': {
@ -1188,7 +1236,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2', Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
'src/third_party/grpc/src': { 'src/third_party/grpc/src': {
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + 'd1338d8751231bdc0d87e732d25420e87d24cffd', 'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + '89f7534e43cf73f56c492a9cf7eb85ca6bfbd87a',
}, },
'src/third_party/freetype/src': 'src/third_party/freetype/src':
@ -1210,7 +1258,7 @@ deps = {
Var('chromium_git') + '/external/github.com/khaledhosny/ots.git' + '@' + Var('ots_revision'), Var('chromium_git') + '/external/github.com/khaledhosny/ots.git' + '@' + Var('ots_revision'),
'src/third_party/libgav1/src': 'src/third_party/libgav1/src':
Var('chromium_git') + '/codecs/libgav1.git' + '@' + '38ca0c62d9079820bbb872d81738770c0a28ae6d', Var('chromium_git') + '/codecs/libgav1.git' + '@' + 'cd53f7c0d6a1c005e38874d143c8876d375bae70',
'src/third_party/google_toolbox_for_mac/src': { 'src/third_party/google_toolbox_for_mac/src': {
'url': Var('chromium_git') + '/external/github.com/google/google-toolbox-for-mac.git' + '@' + Var('google_toolbox_for_mac_revision'), 'url': Var('chromium_git') + '/external/github.com/google/google-toolbox-for-mac.git' + '@' + Var('google_toolbox_for_mac_revision'),
@ -1248,7 +1296,7 @@ deps = {
}, },
'src/third_party/arcore-android-sdk/src': { 'src/third_party/arcore-android-sdk/src': {
'url': Var('chromium_git') + '/external/github.com/google-ar/arcore-android-sdk.git' + '@' + 'eaa85941f2d724c60671bf94f46de7178baba7e6', 'url': Var('chromium_git') + '/external/github.com/google-ar/arcore-android-sdk.git' + '@' + 'd197af6b38f98d2344bc7f76326c4aa0c4f2fb90',
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
@ -1256,7 +1304,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/arcore-android-sdk-client', 'package': 'chromium/third_party/arcore-android-sdk-client',
'version': 'fUSZ4jxIhIx34TxRIcrmOu76-khcka_Gpn0_t9lKCWQC', 'version': 'NYf1qvBS9hNVK-6exAl-MaVbQsRKCDYl5Br936GtcU0C',
}, },
], ],
@ -1290,7 +1338,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e', Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu': 'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '12de966fcbe1d1a48dba310aee63807856ffeee8', Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '6e558942cc8b83e525bdabaf987e06af8a377314',
'src/third_party/icu4j': { 'src/third_party/icu4j': {
'packages': [ 'packages': [
@ -1358,10 +1406,10 @@ deps = {
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'), Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
'src/third_party/libaddressinput/src': 'src/third_party/libaddressinput/src':
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'a56be6f77ea76a9e77561ca306acede244631610', Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
'src/third_party/libaom/source/libaom': 'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '32d6783c4c9f6e78f43bd4ceffaa0eb2143aed57', Var('aomedia_git') + '/aom.git' + '@' + '8dcdafc6d4a2f9f8ea8104f26eca5d123eefcb7f',
'src/third_party/libavif/src': 'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'), Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1394,7 +1442,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '22f1a22c99e9dde8cd3c72ead333f425c5a7aa77', Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '22f1a22c99e9dde8cd3c72ead333f425c5a7aa77',
'src/third_party/liblouis/src': { 'src/third_party/liblouis/src': {
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + 'c05f3bfb0990434bd12bf6697d16ed943f2203c2', 'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '9700847afb92cb35969bdfcbbfbbb74b9c7b3376',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1419,13 +1467,16 @@ deps = {
}, },
'src/third_party/libvpx/source/libvpx': 'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'ca89bed50dbc5fe2abef50c5f36924bb1da6d1f6', Var('chromium_git') + '/webm/libvpx.git' + '@' + '711bef67400f096416cb1ba7f6560e533871490f',
'src/third_party/libwebm/source': 'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da', Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libwebp/src':
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv': 'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '30f9b280487be412da346aecce7834275020976e', Var('chromium_git') + '/libyuv/libyuv.git' + '@' + 'd248929c059ff7629a85333699717d7a677d8d96',
'src/third_party/lighttpd': { 'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'), 'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1516,7 +1567,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6', Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src': 'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '1ea464c1b10821e39dd487b6f9e4813c5c8546b0', Var('chromium_git') + '/openscreen' + '@' + '6be6b78224a276e908b8272542d125e133c40f3f',
'src/third_party/openxr/src': { 'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245', 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1533,7 +1584,7 @@ deps = {
}, },
'src/third_party/perfetto': 'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'a6b4d9563ec37e531ebcc21912c3078ba4c2b367', Var('android_git') + '/platform/external/perfetto.git' + '@' + '361efbf9aab595e4dfa79ec48f242d9e722393c9',
'src/third_party/perl': { 'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3', 'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1569,8 +1620,8 @@ deps = {
'src/third_party/qemu-linux-arm64': { 'src/third_party/qemu-linux-arm64': {
'packages': [ 'packages': [
{ {
'package': 'fuchsia/qemu/linux-arm64', 'package': 'fuchsia/third_party/qemu/linux-arm64',
'version': 'b1b61a39e3ab0935cd030f27e01740578b04b967' 'version': 'BpnoBb2d44_SOm9toN6Lju5a2RLGAc1TPUO6xyijoP8C'
}, },
], ],
'condition': 'host_os == "linux" and checkout_fuchsia and checkout_fuchsia_for_arm64_host', 'condition': 'host_os == "linux" and checkout_fuchsia and checkout_fuchsia_for_arm64_host',
@ -1584,7 +1635,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/r8', 'package': 'chromium/third_party/r8',
'version': 'iMLEt10uXASDfG2AlATR1fO8xYhBoF24nQvDDXLY6Q8C', 'version': 'auReXfxxD74XGdPdi-rYsKrp4sRwYwgNjh_W0PT7vNcC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1608,7 +1659,7 @@ deps = {
}, },
'src/third_party/ruy/src': 'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '7ef39c5745a61f43071e699c6a96da41701ae59f', Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '72155b3185246e9143f4c6a3a7f283d2ebba8524',
'src/third_party/skia': 'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'), Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1620,7 +1671,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0', Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src': 'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'cb47d7089f714e4514f126dfa8ac630cab78ea32', Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'e6b63421941617bf5ccac6b4a62d7a7b4a2c3fef',
'src/third_party/sqlite4java': { 'src/third_party/sqlite4java': {
'packages': [ 'packages': [
@ -1651,25 +1702,20 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f', Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src': 'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '5ddefa9f8d1455c8ca694ba1a511ba82e3a88960', Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ac31ffa987c14665062c00f98ec025a3fdc185ab',
'src/third_party/turbine': { 'src/third_party/turbine': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/turbine', 'package': 'chromium/third_party/turbine',
'version': 'A5bkaoLBc1JYa6Xv31jYUCK5TnXYCbxRrA4FBPXXcWgC', 'version': 'RxaW8ZVJZ7rF3dbAGbL1OV6f9ZELjVeiiQbpkDsuxA4C',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/ub-uiautomator/lib': { 'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@c42337d9ef75170244486b580bad7dfe78447bfd',
'url': Var('chromium_git') + '/chromium/third_party/ub-uiautomator.git' + '@' + '00270549ce3161ae72ceb24712618ea28b4f9434',
'condition': 'checkout_android',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@12989fc8d7d273b5b95ef988d37469738eb9c39b',
'src/third_party/vulkan_memory_allocator': 'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907', Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1702,13 +1748,13 @@ deps = {
Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'd0045ec570c1a77612db35d1e92f05e1d27b4d53', Var('chromium_git') + '/external/github.com/SeleniumHQ/selenium/py.git' + '@' + 'd0045ec570c1a77612db35d1e92f05e1d27b4d53',
'src/third_party/webgl/src': 'src/third_party/webgl/src':
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'bb289ce3cb15bbabd42fdcb01439367846d9069d', Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + '44e4c8770158c505b03ee7feafa4859d083b0912',
'src/third_party/webgpu-cts/src': 'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '84183b561b7f44b2eae6a5f268d5085accb0ad7a', Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'c4eb1df3f306c0ee3e43ba2446eb3616e42d6855',
'src/third_party/webrtc': 'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + '06aea31d10f860ae4236e3422252557762d39188', Var('webrtc_git') + '/src.git' + '@' + 'dc5cf31cad576376abd3aa6306169453cfd85ba5',
'src/third_party/libgifcodec': 'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'), Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
@ -1729,7 +1775,7 @@ deps = {
}, },
'src/third_party/xnnpack/src': 'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'da533e0114f2bf730f17853ae10556d84a3d1e89', Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a33b227047def29b79853ef688b6dda6c6fc5386',
'src/tools/page_cycler/acid3': 'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521', Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1738,7 +1784,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/linux-amd64', 'package': 'skia/tools/goldctl/linux-amd64',
'version': 'l0eo0uR9oMGZFKDucYLGk3vdNGAunP4tGrDhIfzljdcC', 'version': 'BquSeorcTU84O2_A8IoWetGrcfLWxLfZCo9sve1Wt2IC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1748,7 +1794,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/windows-amd64', 'package': 'skia/tools/goldctl/windows-amd64',
'version': '7YFnDYPdgUOTWkaZe19LzWhmwWBuVMM9Q02sTDQtlkgC', 'version': 'AOoQr1u4-cOIEYJDAgVxGWoTiPaRcjrSsjjAaB-u_ggC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1759,7 +1805,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-amd64', 'package': 'skia/tools/goldctl/mac-amd64',
'version': '4shFavk40_bOJI9zUzvH2PaMW7tHKuxeEN0zik75F7sC', 'version': '-t3YY_sZ-jtMAYZ2PlhjudFnEUgk4m-HjlIwSip4tOAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1770,7 +1816,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'skia/tools/goldctl/mac-arm64', 'package': 'skia/tools/goldctl/mac-arm64',
'version': 'Ynx3axT8ZDN7Nl5pNwI5w8PCCt7jax5FGf8jcyjUzt4C', 'version': 'x_xKUnqrgizoTO8mxX4RkyhpQ-nUp_x_go9YH-tc--QC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -1781,7 +1827,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'), Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': { 'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@4ff96f70fd1b507f17579577080637625e6e2ef8', 'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@c29563eac12bc062b66805a4766673729ce7d4ef',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1800,7 +1846,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/eche_app/app', 'package': 'chromeos_internal/apps/eche_app/app',
'version': '3AnrwGQwN1GPv9gbT4wn-9ScZNzXc5x6e68tt5yFI30C', 'version': 'PEjYa5GVISxpuqCZfq9pZ3QeSWhNtWSdQ6gmJ8bizQ0C',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1811,7 +1857,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/help_app/app', 'package': 'chromeos_internal/apps/help_app/app',
'version': 'Vmw_AXRtISh_bHtp8evOfKkTHoF7PksGw7-UBEamlI0C', 'version': 'MqUROEBmHZCBRsEY3abQ7JOvoDr5wZ_MTK3vAN-901wC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1822,7 +1868,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/media_app/app', 'package': 'chromeos_internal/apps/media_app/app',
'version': 'XXOw6zqfIbq0PGDfk6FNetf4Bsym6nYVy42v7R5n2AoC', 'version': 'tV1aN61vvzGiDSJgQxN_namEG8pvO6RTuO-qbQMC51IC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1833,7 +1879,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/projector_app/app', 'package': 'chromeos_internal/apps/projector_app/app',
'version': 'C5cTyNZYNM9JtGlk_csAXrSxTsH8Mk2vTIU2dEfjRskC', 'version': 'Eeqz2JXdGXA3-P7iu9xSzSc3iyUuAruoN1W3-FplxR4C',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -3160,6 +3206,17 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_bouncycastle_bcprov_jdk15on',
'version': 'version:2@1.68.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup': { 'src/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup': {
'packages': [ 'packages': [
{ {
@ -3252,7 +3309,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.6.21.cr1', 'version': 'version:2@1.7.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3263,7 +3320,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.6.21.cr1', 'version': 'version:2@1.7.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3567,6 +3624,15 @@ deps = {
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/tools/bazel': {
'packages': [{
'package': 'infra/3pp/tools/bazel_bootstrap/${{platform}}',
'version': 'version:2@5.2.0.1',
}],
'dep_type': 'cipd',
'condition': 'checkout_bazel',
},
} }
@ -3600,6 +3666,7 @@ include_rules = [
'-absl', '-absl',
'-third_party/abseil-cpp', '-third_party/abseil-cpp',
'+third_party/abseil-cpp/absl/base/attributes.h', '+third_party/abseil-cpp/absl/base/attributes.h',
"+third_party/abseil-cpp/absl/functional/function_ref.h",
"+third_party/abseil-cpp/absl/numeric/int128.h", "+third_party/abseil-cpp/absl/numeric/int128.h",
'+third_party/abseil-cpp/absl/types/optional.h', '+third_party/abseil-cpp/absl/types/optional.h',
'+third_party/abseil-cpp/absl/types/variant.h', '+third_party/abseil-cpp/absl/types/variant.h',
@ -3619,20 +3686,6 @@ skip_child_includes = [
hooks = [ hooks = [
# Download and initialize "vpython" VirtualEnv environment packages for
# Python2. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and
# don't run slower than they might otherwise need to.
{
'name': 'vpython_common',
'pattern': '.',
# TODO(https://crbug.com/1205263): Run this on mac/arm too once it works.
'condition': 'not (host_os == "mac" and host_cpu == "arm64") and enable_vpython_common_crbug_1329052',
'action': [ 'vpython',
'-vpython-spec', 'src/.vpython',
'-vpython-tool', 'install',
],
},
# Download and initialize "vpython" VirtualEnv environment packages for # Download and initialize "vpython" VirtualEnv environment packages for
# Python3. We do this before running any other hooks so that any other # Python3. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and # hooks that might use vpython don't trip over unexpected issues and
@ -3889,6 +3942,14 @@ hooks = [
'-s', 'src/third_party/skia', '-s', 'src/third_party/skia',
'--header', 'src/skia/ext/skia_commit_hash.h'], '--header', 'src/skia/ext/skia_commit_hash.h'],
}, },
{
# Update dawn_version.h.
'name': 'lastchange_dawn',
'pattern': '.',
'action': ['python3', 'src/build/util/lastchange.py',
'-s', 'src/third_party/dawn',
'--revision', 'src/gpu/webgpu/DAWN_VERSION'],
},
# Pull dsymutil binaries using checked-in hashes. # Pull dsymutil binaries using checked-in hashes.
{ {
'name': 'dsymutil_mac_arm64', 'name': 'dsymutil_mac_arm64',
@ -4447,6 +4508,16 @@ hooks = [
], ],
}, },
{
'name': 'Download Fuchsia SDK from GCS',
'pattern': '.',
'condition': 'checkout_fuchsia',
'action': [
'python3',
'src/build/fuchsia/override_sdk.py',
],
},
{ {
'name': 'Download Fuchsia system images', 'name': 'Download Fuchsia system images',
'pattern': '.', 'pattern': '.',
@ -4683,6 +4754,17 @@ hooks = [
], ],
'condition': 'generate_location_tags', 'condition': 'generate_location_tags',
}, },
{
# Clean up build dirs for crbug.com/1337238.
# After a libc++ roll and revert, .ninja_deps would get into a state
# that breaks Ninja on Windows.
# TODO(crbug.com/1337238): Remove in a month or so.
'name': 'del_ninja_deps_cache',
'pattern': '.',
'condition': 'host_os == "win"',
'action': ['python3', 'src/build/del_ninja_deps_cache.py'],
},
] ]
# Add any corresponding DEPS files from this list to chromium.exclusions in # Add any corresponding DEPS files from this list to chromium.exclusions in

View File

@ -159,6 +159,13 @@ buildflag_header("ios_cronet_buildflags") {
flags = [ "CRONET_BUILD=$is_cronet_build" ] flags = [ "CRONET_BUILD=$is_cronet_build" ]
} }
enable_message_pump_epoll = is_linux || is_chromeos || is_android
buildflag_header("message_pump_buildflags") {
header = "message_pump_buildflags.h"
header_dir = "base/message_loop"
flags = [ "ENABLE_MESSAGE_PUMP_EPOLL=$enable_message_pump_epoll" ]
}
# Base and everything it depends on should be a static library rather than # Base and everything it depends on should be a static library rather than
# a source set. Base is more of a "library" in the classic sense in that many # a source set. Base is more of a "library" in the classic sense in that many
# small parts of it are used in many different contexts. This combined with a # small parts of it are used in many different contexts. This combined with a
@ -180,6 +187,10 @@ mixed_component("base") {
"allocator/allocator_check.h", "allocator/allocator_check.h",
"allocator/allocator_extension.cc", "allocator/allocator_extension.cc",
"allocator/allocator_extension.h", "allocator/allocator_extension.h",
"allocator/dispatcher/dispatcher.cc",
"allocator/dispatcher/dispatcher.h",
"allocator/dispatcher/reentry_guard.cc",
"allocator/dispatcher/reentry_guard.h",
"as_const.h", "as_const.h",
"at_exit.cc", "at_exit.cc",
"at_exit.h", "at_exit.h",
@ -217,6 +228,8 @@ mixed_component("base") {
"cancelable_callback.h", "cancelable_callback.h",
"check.cc", "check.cc",
"check.h", "check.h",
"check_is_test.cc",
"check_is_test.h",
"check_op.cc", "check_op.cc",
"check_op.h", "check_op.h",
"command_line.cc", "command_line.cc",
@ -400,6 +413,7 @@ mixed_component("base") {
"memory/raw_ptr_asan_service.cc", "memory/raw_ptr_asan_service.cc",
"memory/raw_ptr_asan_service.h", "memory/raw_ptr_asan_service.h",
"memory/raw_ptr_exclusion.h", "memory/raw_ptr_exclusion.h",
"memory/raw_ref.h",
"memory/raw_scoped_refptr_mismatch_checker.h", "memory/raw_scoped_refptr_mismatch_checker.h",
"memory/read_only_shared_memory_region.cc", "memory/read_only_shared_memory_region.cc",
"memory/read_only_shared_memory_region.h", "memory/read_only_shared_memory_region.h",
@ -498,7 +512,6 @@ mixed_component("base") {
"native_library.cc", "native_library.cc",
"native_library.h", "native_library.h",
"no_destructor.h", "no_destructor.h",
"notreached.cc",
"notreached.h", "notreached.h",
"observer_list.h", "observer_list.h",
"observer_list_internal.cc", "observer_list_internal.cc",
@ -552,7 +565,6 @@ mixed_component("base") {
"profiler/metadata_recorder.h", "profiler/metadata_recorder.h",
"profiler/module_cache.cc", "profiler/module_cache.cc",
"profiler/module_cache.h", "profiler/module_cache.h",
"profiler/native_unwinder.h",
"profiler/profile_builder.h", "profiler/profile_builder.h",
"profiler/register_context.h", "profiler/register_context.h",
"profiler/sample_metadata.cc", "profiler/sample_metadata.cc",
@ -914,6 +926,7 @@ mixed_component("base") {
"trace_event/trace_id_helper.h", "trace_event/trace_id_helper.h",
"traits_bag.h", "traits_bag.h",
"tuple.h", "tuple.h",
"types/always_false.h",
"types/expected.h", "types/expected.h",
"types/expected_internal.h", "types/expected_internal.h",
"types/id_type.h", "types/id_type.h",
@ -1266,9 +1279,9 @@ mixed_component("base") {
"process/process_iterator_mac.cc", "process/process_iterator_mac.cc",
"process/process_mac.cc", "process/process_mac.cc",
"process/process_metrics_mac.cc", "process/process_metrics_mac.cc",
"profiler/frame_pointer_unwinder.cc",
"profiler/frame_pointer_unwinder.h",
"profiler/module_cache_mac.cc", "profiler/module_cache_mac.cc",
"profiler/native_unwinder_apple.cc",
"profiler/native_unwinder_apple.h",
"profiler/stack_sampler_mac.cc", "profiler/stack_sampler_mac.cc",
"profiler/suspendable_thread_delegate_mac.cc", "profiler/suspendable_thread_delegate_mac.cc",
"profiler/suspendable_thread_delegate_mac.h", "profiler/suspendable_thread_delegate_mac.h",
@ -1314,8 +1327,8 @@ mixed_component("base") {
if (ios_stack_profiler_enabled) { if (ios_stack_profiler_enabled) {
sources += [ sources += [
"profiler/native_unwinder_apple.cc", "profiler/frame_pointer_unwinder.cc",
"profiler/native_unwinder_apple.h", "profiler/frame_pointer_unwinder.h",
"profiler/suspendable_thread_delegate_mac.cc", "profiler/suspendable_thread_delegate_mac.cc",
"profiler/suspendable_thread_delegate_mac.h", "profiler/suspendable_thread_delegate_mac.h",
] ]
@ -1415,15 +1428,13 @@ mixed_component("base") {
configs += [ configs += [
"//build/config:precompiled_headers", "//build/config:precompiled_headers",
"//build/config/compiler:prevent_unsafe_narrowing",
# TODO(crbug.com/1292951): Enable.
# "//build/config/compiler:prevent_unsafe_narrowing",
"//build/config/compiler:wexit_time_destructors", "//build/config/compiler:wexit_time_destructors",
"//build/config/compiler:wglobal_constructors", "//build/config/compiler:wglobal_constructors",
] ]
deps = [ deps = [
":message_pump_buildflags",
"//base/allocator:buildflags", "//base/allocator:buildflags",
"//base/third_party/double_conversion", "//base/third_party/double_conversion",
"//base/third_party/dynamic_annotations", "//base/third_party/dynamic_annotations",
@ -1676,8 +1687,14 @@ mixed_component("base") {
"logging_chromeos.cc", "logging_chromeos.cc",
"system/sys_info_chromeos.cc", "system/sys_info_chromeos.cc",
] ]
if (is_chromeos_ash) {
sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ] sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ]
if (current_cpu == "x64") {
sources += [
"profiler/frame_pointer_unwinder.cc",
"profiler/frame_pointer_unwinder.h",
]
} }
} }
@ -1792,6 +1809,7 @@ mixed_component("base") {
deps += [ deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys",
"//third_party/fuchsia-sdk/sdk/pkg/async-default", "//third_party/fuchsia-sdk/sdk/pkg/async-default",
@ -2178,7 +2196,7 @@ mixed_component("base") {
} }
if (dep_libevent) { if (dep_libevent) {
deps += [ "//base/third_party/libevent" ] deps += [ "//third_party/libevent" ]
} }
if (use_libevent) { if (use_libevent) {
@ -2188,6 +2206,13 @@ mixed_component("base") {
] ]
} }
if (enable_message_pump_epoll) {
sources += [
"message_loop/message_pump_epoll.cc",
"message_loop/message_pump_epoll.h",
]
}
# Android and MacOS have their own custom shared memory handle # Android and MacOS have their own custom shared memory handle
# implementations. e.g. due to supporting both POSIX and native handles. # implementations. e.g. due to supporting both POSIX and native handles.
if (is_posix && !is_android && !is_apple) { if (is_posix && !is_android && !is_apple) {
@ -2229,8 +2254,7 @@ mixed_component("base") {
] ]
} }
if ((is_posix && !is_apple && !is_android && !is_chromeos_ash) || if ((is_posix && !is_apple && !is_android && !is_chromeos) || is_fuchsia) {
is_fuchsia) {
sources += [ "power_monitor/power_monitor_device_source_stub.cc" ] sources += [ "power_monitor/power_monitor_device_source_stub.cc" ]
} }
@ -2287,8 +2311,6 @@ mixed_component("base") {
"trace_event/process_memory_dump.cc", "trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h", "trace_event/process_memory_dump.h",
"trace_event/task_execution_macros.h", "trace_event/task_execution_macros.h",
"trace_event/thread_instruction_count.cc",
"trace_event/thread_instruction_count.h",
"trace_event/trace_arguments.cc", "trace_event/trace_arguments.cc",
"trace_event/trace_arguments.h", "trace_event/trace_arguments.h",
"trace_event/trace_buffer.cc", "trace_event/trace_buffer.cc",

View File

@ -3,6 +3,7 @@ include_rules = [
"+third_party/apple_apsl", "+third_party/apple_apsl",
"+third_party/boringssl/src/include", "+third_party/boringssl/src/include",
"+third_party/ced", "+third_party/ced",
"+third_party/libevent",
"+third_party/libunwindstack/src/libunwindstack/include", "+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss", "+third_party/lss",
"+third_party/modp_b64", "+third_party/modp_b64",

View File

@ -2,7 +2,7 @@ lizeb@chromium.org
primiano@chromium.org primiano@chromium.org
wfh@chromium.org wfh@chromium.org
per-file allocator.gni=bartekn@chromium.org per-file allocator.gni=file://base/allocator/partition_allocator/OWNERS
per-file allocator_shim_default_dispatch_to_partition_alloc*=bartekn@chromium.org per-file allocator_shim_default_dispatch_to_partition_alloc*=file://base/allocator/partition_allocator/OWNERS
per-file partition_alloc*=bartekn@chromium.org per-file partition_alloc*=file://base/allocator/partition_allocator/OWNERS
per-file BUILD.gn=bartekn@chromium.org per-file BUILD.gn=file://base/allocator/partition_allocator/OWNERS

View File

@ -81,7 +81,8 @@ if (is_win && use_allocator_shim) {
"The allocator shim doesn't work for the component build on Windows.") "The allocator shim doesn't work for the component build on Windows.")
} }
_is_brp_supported = (is_win || is_android) && use_allocator == "partition" _is_brp_supported = (is_win || is_android || is_linux || is_mac ||
is_chromeos) && use_allocator == "partition"
declare_args() { declare_args() {
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation # Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
@ -112,7 +113,10 @@ declare_args() {
# Finch. # Finch.
use_fake_binary_experiment = false use_fake_binary_experiment = false
use_asan_backup_ref_ptr = false # The supported platforms are supposed to match `_is_brp_supported`, but we
# enable the feature on Linux early because it's most widely used for security
# research
use_asan_backup_ref_ptr = is_asan && (is_win || is_android || is_linux)
} }
# Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used. # Prevent using BackupRefPtr when PartitionAlloc-Everywhere isn't used.

View File

@ -44,8 +44,7 @@
#include "base/mac/mac_util.h" #include "base/mac/mac_util.h"
#endif #endif
namespace base { namespace base::allocator {
namespace allocator {
bool g_replaced_default_zone = false; bool g_replaced_default_zone = false;
@ -237,9 +236,9 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
bool CanGetContextForCFAllocator() { bool CanGetContextForCFAllocator() {
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
return !base::ios::IsRunningOnOrLater(16, 0, 0); return !base::ios::IsRunningOnOrLater(17, 0, 0);
#else #else
return !base::mac::IsOSLaterThan12_DontCallThis(); return !base::mac::IsOSLaterThan13_DontCallThis();
#endif #endif
} }
@ -258,7 +257,7 @@ void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
void* info) { void* info) {
void* result = g_old_cfallocator_system_default(alloc_size, hint, info); void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
if (!result) if (!result)
TerminateBecauseOutOfMemory(alloc_size); TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
return result; return result;
} }
@ -267,7 +266,7 @@ void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
void* info) { void* info) {
void* result = g_old_cfallocator_malloc(alloc_size, hint, info); void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
if (!result) if (!result)
TerminateBecauseOutOfMemory(alloc_size); TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
return result; return result;
} }
@ -276,7 +275,7 @@ void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
void* info) { void* info) {
void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info); void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
if (!result) if (!result)
TerminateBecauseOutOfMemory(alloc_size); TerminateBecauseOutOfMemory(static_cast<size_t>(alloc_size));
return result; return result;
} }
@ -609,5 +608,4 @@ void ReplaceZoneFunctions(ChromeMallocZone* zone,
} }
} }
} // namespace allocator } // namespace base::allocator
} // namespace base

View File

@ -174,6 +174,7 @@ BASE_EXPORT void InitializeAllocatorShim();
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer(); BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>; using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>; using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
using UseDedicatedAlignedPartition = using UseDedicatedAlignedPartition =
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>; base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
@ -185,12 +186,13 @@ using AlternateBucketDistribution =
// thread-cache on the main (malloc) partition will be disabled. // thread-cache on the main (malloc) partition will be disabled.
BASE_EXPORT void ConfigurePartitions( BASE_EXPORT void ConfigurePartitions(
EnableBrp enable_brp, EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
SplitMainPartition split_main_partition, SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition, UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution); AlternateBucketDistribution use_alternate_bucket_distribution);
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
BASE_EXPORT void EnablePCScan(base::internal::PCScan::InitConfig); BASE_EXPORT void EnablePCScan(partition_alloc::internal::PCScan::InitConfig);
#endif #endif
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

View File

@ -27,6 +27,7 @@
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/memory/nonscannable_memory.h" #include "base/memory/nonscannable_memory.h"
#include "base/numerics/checked_math.h" #include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "build/chromecast_buildflags.h" #include "build/chromecast_buildflags.h"
@ -146,26 +147,27 @@ T* LeakySingleton<T, Constructor>::GetSlowPath() {
class MainPartitionConstructor { class MainPartitionConstructor {
public: public:
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) { static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) {
constexpr base::PartitionOptions::ThreadCache thread_cache = constexpr partition_alloc::PartitionOptions::ThreadCache thread_cache =
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Additional partitions may be created in ConfigurePartitions(). Since // Additional partitions may be created in ConfigurePartitions(). Since
// only one partition can have thread cache enabled, postpone the // only one partition can have thread cache enabled, postpone the
// decision to turn the thread cache on until after that call. // decision to turn the thread cache on until after that call.
// TODO(bartekn): Enable it here by default, once the "split-only" mode // TODO(bartekn): Enable it here by default, once the "split-only" mode
// is no longer needed. // is no longer needed.
base::PartitionOptions::ThreadCache::kDisabled; partition_alloc::PartitionOptions::ThreadCache::kDisabled;
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Other tests, such as the ThreadCache tests create a thread cache, // Other tests, such as the ThreadCache tests create a thread cache,
// and only one is supported at a time. // and only one is supported at a time.
base::PartitionOptions::ThreadCache::kDisabled; partition_alloc::PartitionOptions::ThreadCache::kDisabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({ auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed, partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
thread_cache, thread_cache,
base::PartitionOptions::Quarantine::kAllowed, partition_alloc::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed, partition_alloc::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled, partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo, partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
}); });
return new_root; return new_root;
@ -275,9 +277,9 @@ namespace internal {
namespace { namespace {
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
int g_alloc_flags = 0; unsigned int g_alloc_flags = 0;
#else #else
constexpr int g_alloc_flags = 0; constexpr unsigned int g_alloc_flags = 0;
#endif #endif
} // namespace } // namespace
@ -300,7 +302,7 @@ void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) { void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
ScopedDisallowAllocations guard{}; ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(
0 | g_alloc_flags, MaybeAdjustSize(size), g_alloc_flags, MaybeAdjustSize(size),
partition_alloc::PartitionPageSize()); partition_alloc::PartitionPageSize());
} }
@ -513,17 +515,19 @@ void PartitionBatchFree(const AllocatorDispatch*,
} }
// static // static
ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() { partition_alloc::ThreadSafePartitionRoot* PartitionAllocMalloc::Allocator() {
return ::Allocator(); return ::Allocator();
} }
// static // static
ThreadSafePartitionRoot* PartitionAllocMalloc::OriginalAllocator() { partition_alloc::ThreadSafePartitionRoot*
PartitionAllocMalloc::OriginalAllocator() {
return ::OriginalAllocator(); return ::OriginalAllocator();
} }
// static // static
ThreadSafePartitionRoot* PartitionAllocMalloc::AlignedAllocator() { partition_alloc::ThreadSafePartitionRoot*
PartitionAllocMalloc::AlignedAllocator() {
return ::AlignedAllocator(); return ::AlignedAllocator();
} }
@ -563,6 +567,7 @@ alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t
void ConfigurePartitions( void ConfigurePartitions(
EnableBrp enable_brp, EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping,
SplitMainPartition split_main_partition, SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition, UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AlternateBucketDistribution use_alternate_bucket_distribution) { AlternateBucketDistribution use_alternate_bucket_distribution) {
@ -596,18 +601,22 @@ void ConfigurePartitions(
PA_DCHECK(!current_root->flags.with_thread_cache); PA_DCHECK(!current_root->flags.with_thread_cache);
return; return;
} }
auto* new_root = new (g_allocator_buffer_for_new_main_partition)
auto* new_root = partition_alloc::ThreadSafePartitionRoot({
new (g_allocator_buffer_for_new_main_partition) ThreadSafePartitionRoot({
!use_dedicated_aligned_partition !use_dedicated_aligned_partition
? base::PartitionOptions::AlignedAlloc::kAllowed ? partition_alloc::PartitionOptions::AlignedAlloc::kAllowed
: base::PartitionOptions::AlignedAlloc::kDisallowed, : partition_alloc::PartitionOptions::AlignedAlloc::kDisallowed,
base::PartitionOptions::ThreadCache::kDisabled, partition_alloc::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed, partition_alloc::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed, partition_alloc::PartitionOptions::Cookie::kAllowed,
enable_brp ? base::PartitionOptions::BackupRefPtr::kEnabled enable_brp
: base::PartitionOptions::BackupRefPtr::kDisabled, ? partition_alloc::PartitionOptions::BackupRefPtr::kEnabled
base::PartitionOptions::UseConfigurablePool::kNo, : partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
enable_brp_zapping
? partition_alloc::PartitionOptions::BackupRefPtrZapping::kEnabled
: partition_alloc::PartitionOptions::BackupRefPtrZapping::
kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
}); });
partition_alloc::ThreadSafePartitionRoot* new_aligned_root; partition_alloc::ThreadSafePartitionRoot* new_aligned_root;
@ -615,13 +624,14 @@ void ConfigurePartitions(
// TODO(bartekn): Use the original root instead of creating a new one. It'd // TODO(bartekn): Use the original root instead of creating a new one. It'd
// result in one less partition, but come at a cost of commingling types. // result in one less partition, but come at a cost of commingling types.
new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition) new_aligned_root = new (g_allocator_buffer_for_aligned_alloc_partition)
ThreadSafePartitionRoot({ partition_alloc::ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed, partition_alloc::PartitionOptions::AlignedAlloc::kAllowed,
base::PartitionOptions::ThreadCache::kDisabled, partition_alloc::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::Quarantine::kAllowed, partition_alloc::PartitionOptions::Quarantine::kAllowed,
base::PartitionOptions::Cookie::kAllowed, partition_alloc::PartitionOptions::Cookie::kAllowed,
base::PartitionOptions::BackupRefPtr::kDisabled, partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
base::PartitionOptions::UseConfigurablePool::kNo, partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
}); });
} else { } else {
// The new main root can also support AlignedAlloc. // The new main root can also support AlignedAlloc.
@ -643,8 +653,9 @@ void ConfigurePartitions(
PA_CHECK(current_aligned_root == g_original_root); PA_CHECK(current_aligned_root == g_original_root);
// Purge memory, now that the traffic to the original partition is cut off. // Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans | current_root->PurgeMemory(
PurgeFlags::kDiscardUnusedSystemPages); partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
partition_alloc::PurgeFlags::kDiscardUnusedSystemPages);
if (!use_alternate_bucket_distribution) { if (!use_alternate_bucket_distribution) {
g_root.Get()->SwitchToDenserBucketDistribution(); g_root.Get()->SwitchToDenserBucketDistribution();
@ -653,16 +664,18 @@ void ConfigurePartitions(
} }
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
void EnablePCScan(base::internal::PCScan::InitConfig config) { void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) {
partition_alloc::internal::base::PlatformThread::SetThreadNameHook( partition_alloc::internal::base::PlatformThread::SetThreadNameHook(
&::base::PlatformThread::SetName); &::base::PlatformThread::SetName);
internal::PCScan::Initialize(config); partition_alloc::internal::PCScan::Initialize(config);
internal::PCScan::RegisterScannableRoot(Allocator()); partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator());
if (OriginalAllocator() != nullptr) if (OriginalAllocator() != nullptr)
internal::PCScan::RegisterScannableRoot(OriginalAllocator()); partition_alloc::internal::PCScan::RegisterScannableRoot(
OriginalAllocator());
if (Allocator() != AlignedAllocator()) if (Allocator() != AlignedAllocator())
internal::PCScan::RegisterScannableRoot(AlignedAllocator()); partition_alloc::internal::PCScan::RegisterScannableRoot(
AlignedAllocator());
internal::NonScannableAllocator::Instance().NotifyPCScanEnabled(); internal::NonScannableAllocator::Instance().NotifyPCScanEnabled();
internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled(); internal::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled();
@ -724,11 +737,11 @@ SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW { SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
base::SimplePartitionStatsDumper allocator_dumper; partition_alloc::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper); Allocator()->DumpStats("malloc", true, &allocator_dumper);
// TODO(bartekn): Dump OriginalAllocator() into "malloc" as well. // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well.
base::SimplePartitionStatsDumper aligned_allocator_dumper; partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper;
if (AlignedAllocator() != Allocator()) { if (AlignedAllocator() != Allocator()) {
AlignedAllocator()->DumpStats("posix_memalign", true, AlignedAllocator()->DumpStats("posix_memalign", true,
&aligned_allocator_dumper); &aligned_allocator_dumper);
@ -737,13 +750,13 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
// Dump stats for nonscannable and nonquarantinable allocators. // Dump stats for nonscannable and nonquarantinable allocators.
auto& nonscannable_allocator = auto& nonscannable_allocator =
base::internal::NonScannableAllocator::Instance(); base::internal::NonScannableAllocator::Instance();
base::SimplePartitionStatsDumper nonscannable_allocator_dumper; partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper;
if (auto* nonscannable_root = nonscannable_allocator.root()) if (auto* nonscannable_root = nonscannable_allocator.root())
nonscannable_root->DumpStats("malloc", true, nonscannable_root->DumpStats("malloc", true,
&nonscannable_allocator_dumper); &nonscannable_allocator_dumper);
auto& nonquarantinable_allocator = auto& nonquarantinable_allocator =
base::internal::NonQuarantinableAllocator::Instance(); base::internal::NonQuarantinableAllocator::Instance();
base::SimplePartitionStatsDumper nonquarantinable_allocator_dumper; partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper;
if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) if (auto* nonquarantinable_root = nonquarantinable_allocator.root())
nonquarantinable_root->DumpStats("malloc", true, nonquarantinable_root->DumpStats("malloc", true,
&nonquarantinable_allocator_dumper); &nonquarantinable_allocator_dumper);
@ -752,20 +765,23 @@ SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
info.arena = 0; // Memory *not* allocated with mmap(). info.arena = 0; // Memory *not* allocated with mmap().
// Memory allocated with mmap(), aka virtual size. // Memory allocated with mmap(), aka virtual size.
info.hblks = allocator_dumper.stats().total_mmapped_bytes + info.hblks = base::checked_cast<decltype(info.hblks)>(
allocator_dumper.stats().total_mmapped_bytes +
aligned_allocator_dumper.stats().total_mmapped_bytes + aligned_allocator_dumper.stats().total_mmapped_bytes +
nonscannable_allocator_dumper.stats().total_mmapped_bytes + nonscannable_allocator_dumper.stats().total_mmapped_bytes +
nonquarantinable_allocator_dumper.stats().total_mmapped_bytes; nonquarantinable_allocator_dumper.stats().total_mmapped_bytes);
// Resident bytes. // Resident bytes.
info.hblkhd = allocator_dumper.stats().total_resident_bytes + info.hblkhd = base::checked_cast<decltype(info.hblkhd)>(
allocator_dumper.stats().total_resident_bytes +
aligned_allocator_dumper.stats().total_resident_bytes + aligned_allocator_dumper.stats().total_resident_bytes +
nonscannable_allocator_dumper.stats().total_resident_bytes + nonscannable_allocator_dumper.stats().total_resident_bytes +
nonquarantinable_allocator_dumper.stats().total_resident_bytes; nonquarantinable_allocator_dumper.stats().total_resident_bytes);
// Allocated bytes. // Allocated bytes.
info.uordblks = allocator_dumper.stats().total_active_bytes + info.uordblks = base::checked_cast<decltype(info.uordblks)>(
allocator_dumper.stats().total_active_bytes +
aligned_allocator_dumper.stats().total_active_bytes + aligned_allocator_dumper.stats().total_active_bytes +
nonscannable_allocator_dumper.stats().total_active_bytes + nonscannable_allocator_dumper.stats().total_active_bytes +
nonquarantinable_allocator_dumper.stats().total_active_bytes; nonquarantinable_allocator_dumper.stats().total_active_bytes);
return info; return info;
} }

View File

@ -16,11 +16,11 @@ void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value);
class BASE_EXPORT PartitionAllocMalloc { class BASE_EXPORT PartitionAllocMalloc {
public: public:
static ThreadSafePartitionRoot* Allocator(); static partition_alloc::ThreadSafePartitionRoot* Allocator();
// May return |nullptr|, will never return the same pointer as |Allocator()|. // May return |nullptr|, will never return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* OriginalAllocator(); static partition_alloc::ThreadSafePartitionRoot* OriginalAllocator();
// May return the same pointer as |Allocator()|. // May return the same pointer as |Allocator()|.
static ThreadSafePartitionRoot* AlignedAllocator(); static partition_alloc::ThreadSafePartitionRoot* AlignedAllocator();
}; };
BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*, BASE_EXPORT void* PartitionMalloc(const base::allocator::AllocatorDispatch*,

View File

@ -17,43 +17,6 @@
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "build/build_config.h" #include "build/build_config.h"
// std::align_val_t isn't available until C++17, but we want to override aligned
// new/delete anyway to prevent a possible situation where a library gets loaded
// in that uses the aligned operators. We want to avoid a situation where
// separate heaps are used.
// TODO(thomasanderson): Remove this once building with C++17 or later.
#if defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606
#define ALIGN_VAL_T std::align_val_t
#define ALIGN_LINKAGE
#define ALIGN_NEW operator new
#define ALIGN_NEW_NOTHROW operator new
#define ALIGN_DEL operator delete
#define ALIGN_DEL_SIZED operator delete
#define ALIGN_DEL_NOTHROW operator delete
#define ALIGN_NEW_ARR operator new[]
#define ALIGN_NEW_ARR_NOTHROW operator new[]
#define ALIGN_DEL_ARR operator delete[]
#define ALIGN_DEL_ARR_SIZED operator delete[]
#define ALIGN_DEL_ARR_NOTHROW operator delete[]
#else
#define ALIGN_VAL_T size_t
#define ALIGN_LINKAGE extern "C"
#if BUILDFLAG(IS_WIN)
#error "Mangling is different on these platforms."
#else
#define ALIGN_NEW _ZnwmSt11align_val_t
#define ALIGN_NEW_NOTHROW _ZnwmSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL _ZdlPvSt11align_val_t
#define ALIGN_DEL_SIZED _ZdlPvmSt11align_val_t
#define ALIGN_DEL_NOTHROW _ZdlPvSt11align_val_tRKSt9nothrow_t
#define ALIGN_NEW_ARR _ZnamSt11align_val_t
#define ALIGN_NEW_ARR_NOTHROW _ZnamSt11align_val_tRKSt9nothrow_t
#define ALIGN_DEL_ARR _ZdaPvSt11align_val_t
#define ALIGN_DEL_ARR_SIZED _ZdaPvmSt11align_val_t
#define ALIGN_DEL_ARR_NOTHROW _ZdaPvSt11align_val_tRKSt9nothrow_t
#endif
#endif
#if !BUILDFLAG(IS_APPLE) #if !BUILDFLAG(IS_APPLE)
#define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT #define SHIM_CPP_SYMBOLS_EXPORT SHIM_ALWAYS_EXPORT
#else #else
@ -110,57 +73,58 @@ SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p, size_t) __THROW {
ShimCppDelete(p); ShimCppDelete(p);
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW(std::size_t size, SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
ALIGN_VAL_T alignment) { std::align_val_t alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment)); return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_NOTHROW( SHIM_CPP_SYMBOLS_EXPORT void* operator new(std::size_t size,
std::size_t size, std::align_val_t alignment,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW { const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment)); return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL(void* p, SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
ALIGN_VAL_T) __THROW { std::align_val_t) __THROW {
ShimCppDelete(p); ShimCppDelete(p);
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
ALIGN_DEL_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void
ALIGN_DEL_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR(
std::size_t size, std::size_t size,
ALIGN_VAL_T alignment) { std::align_val_t) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void operator delete(void* p,
std::align_val_t,
const std::nothrow_t&) __THROW {
ShimCppDelete(p);
}
SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
std::align_val_t alignment) {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment)); return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void* ALIGN_NEW_ARR_NOTHROW( SHIM_CPP_SYMBOLS_EXPORT void* operator new[](std::size_t size,
std::size_t size, std::align_val_t alignment,
ALIGN_VAL_T alignment,
const std::nothrow_t&) __THROW { const std::nothrow_t&) __THROW {
return ShimCppAlignedNew(size, static_cast<size_t>(alignment)); return ShimCppAlignedNew(size, static_cast<size_t>(alignment));
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void ALIGN_DEL_ARR(void* p, SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
ALIGN_VAL_T) __THROW { std::align_val_t) __THROW {
ShimCppDelete(p); ShimCppDelete(p);
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
ALIGN_DEL_ARR_SIZED(void* p, std::size_t size, ALIGN_VAL_T) __THROW { std::size_t size,
std::align_val_t) __THROW {
ShimCppDelete(p); ShimCppDelete(p);
} }
ALIGN_LINKAGE SHIM_CPP_SYMBOLS_EXPORT void SHIM_CPP_SYMBOLS_EXPORT void operator delete[](void* p,
ALIGN_DEL_ARR_NOTHROW(void* p, ALIGN_VAL_T, const std::nothrow_t&) __THROW { std::align_val_t,
const std::nothrow_t&) __THROW {
ShimCppDelete(p); ShimCppDelete(p);
} }

View File

@ -122,7 +122,10 @@ SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
malloc(kInitialSize)); // Our malloc() doesn't return nullptr. malloc(kInitialSize)); // Our malloc() doesn't return nullptr.
int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args); int actual_size = vsnprintf(*strp, kInitialSize, fmt, va_args);
*strp = static_cast<char*>(realloc(*strp, actual_size + 1)); if (actual_size < 0)
return actual_size;
*strp =
static_cast<char*>(realloc(*strp, static_cast<size_t>(actual_size + 1)));
// Now we know the size. This is not very efficient, but we cannot really do // Now we know the size. This is not very efficient, but we cannot really do
// better without accessing internal libc functions, or reimplementing // better without accessing internal libc functions, or reimplementing
@ -131,7 +134,7 @@ SHIM_ALWAYS_EXPORT int __wrap_vasprintf(char** strp,
// This is very lightly used in Chromium in practice, see crbug.com/116558 for // This is very lightly used in Chromium in practice, see crbug.com/116558 for
// details. // details.
if (actual_size >= kInitialSize) if (actual_size >= kInitialSize)
return vsnprintf(*strp, actual_size + 1, fmt, va_args); return vsnprintf(*strp, static_cast<size_t>(actual_size + 1), fmt, va_args);
return actual_size; return actual_size;
} }

View File

@ -0,0 +1,239 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details {
namespace {
using allocator::AllocatorDispatch;
void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
ReentryGuard guard;
void* address = self->next->alloc_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_unchecked_function(self->next, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_zero_initialized_function(self->next, n, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, n * size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ReentryGuard guard;
void* address =
self->next->alloc_aligned_function(self->next, alignment, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->realloc_function(self->next, address, size, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
// Note: The RecordFree should be called before free_function
// (here and in other places).
// That is because we need to remove the recorded allocation sample before
// free_function, as once the latter is executed the address becomes available
// and can be allocated by another thread. That would be racy otherwise.
PoissonAllocationSampler::RecordFree(address);
self->next->free_function(self->next, address, context);
}
size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
ReentryGuard guard;
unsigned num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
if (LIKELY(guard)) {
for (unsigned i = 0; i < num_allocated; ++i) {
PoissonAllocationSampler::RecordAlloc(
results[i], size, PoissonAllocationSampler::kMalloc, nullptr);
}
}
return num_allocated;
}
void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
for (unsigned i = 0; i < num_to_be_freed; ++i)
PoissonAllocationSampler::RecordFree(to_be_freed[i]);
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* address =
self->next->aligned_malloc_function(self->next, size, alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
} // namespace
} // namespace base::allocator::dispatcher::allocator_shim_details
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher::partition_allocator_details {
namespace {
void PartitionAllocHook(void* address, size_t size, const char* type) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kPartitionAlloc, type);
}
void PartitionFreeHook(void* address) {
PoissonAllocationSampler::RecordFree(address);
}
} // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
namespace base::allocator::dispatcher {
void InstallStandardAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator::InsertAllocatorDispatch(
&allocator_shim_details::g_allocator_dispatch);
#else
// If the allocator shim isn't available, then we don't install any hooks.
// There's no point in printing an error message, since this can regularly
// happen for tests.
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(
&partition_allocator_details::PartitionAllocHook,
&partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
}
void RemoveStandardAllocatorHooksForTesting() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator::RemoveAllocatorDispatchForTesting(
&allocator_shim_details::g_allocator_dispatch); // IN-TEST
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC) && !BUILDFLAG(IS_NACL)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif
}
} // namespace base::allocator::dispatcher

View File

@ -0,0 +1,17 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/base_export.h"
namespace base::allocator::dispatcher {
void BASE_EXPORT InstallStandardAllocatorHooks();
void BASE_EXPORT RemoveStandardAllocatorHooksForTesting();
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_

View File

@ -0,0 +1,20 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
namespace base::allocator::dispatcher {
pthread_key_t ReentryGuard::entered_key_ = 0;
} // namespace base::allocator::dispatcher
#endif

View File

@ -0,0 +1,68 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#define BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
#endif
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
// The macOS implementation of libmalloc sometimes calls malloc recursively,
// delegating allocations between zones. That causes our hooks being called
// twice. The scoped guard allows us to detect that.
//
// Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables.
// Make use of pthread TLS instead of C++ thread_local there.
struct ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
}
~ReentryGuard() {
if (LIKELY(allowed_))
pthread_setspecific(entered_key_, nullptr);
}
explicit operator bool() const noexcept { return allowed_; }
// This function must be called in very early of the process start-up in
// order to acquire a low TLS slot number because glibc TLS implementation
// will require a malloc call to allocate storage for a higher slot number
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
static void Init() {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
}
private:
static pthread_key_t entered_key_;
const bool allowed_;
};
#else
// Use [[maybe_unused]] as this lightweight stand-in for the more heavyweight
// ReentryGuard above will otherwise trigger the "unused code" warnings.
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; }
static void Init() {}
};
#endif
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_REENTRY_GUARD_H_

View File

@ -11,11 +11,19 @@
namespace base { namespace base {
namespace features { namespace features {
// When set, instead of crashing when encountering a dangling raw_ptr, the const BASE_EXPORT Feature kPartitionAllocDanglingPtr{
// signatures of the two stacktraces are logged. This is meant to be used only "PartitionAllocDanglingPtr", FEATURE_DISABLED_BY_DEFAULT};
// by Chromium developers. See /docs/dangling_ptr.md constexpr FeatureParam<DanglingPtrMode>::Option kDanglingPtrModeOption[] = {
const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord{ {DanglingPtrMode::kCrash, "crash"},
"PartitionAllocDanglingPtrRecord", FEATURE_DISABLED_BY_DEFAULT}; {DanglingPtrMode::kLogSignature, "log_signature"},
};
const base::FeatureParam<DanglingPtrMode> kDanglingPtrModeParam{
&kPartitionAllocDanglingPtr,
"mode",
DanglingPtrMode::kCrash,
&kDanglingPtrModeOption,
};
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
// If enabled, PCScan is turned on by default for all partitions that don't // If enabled, PCScan is turned on by default for all partitions that don't
// disable it explicitly. // disable it explicitly.
@ -55,7 +63,8 @@ const BASE_EXPORT Feature kPartitionAllocLargeEmptySlotSpanRing{
const Feature kPartitionAllocBackupRefPtr { const Feature kPartitionAllocBackupRefPtr {
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -79,6 +88,7 @@ const base::FeatureParam<BackupRefPtrEnabledProcesses>
constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = { constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"}, {BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"}, {BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way, {BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"}, "disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way, {BackupRefPtrMode::kDisabledButSplitPartitions3Way,

View File

@ -17,7 +17,28 @@ struct Feature;
namespace features { namespace features {
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtrRecord; // See /docs/dangling_ptr.md
//
// Usage:
// --enable-features=PartitionAllocDanglingPtr:mode/crash
// --enable-features=PartitionAllocDanglingPtr:mode/log_signature
extern const BASE_EXPORT Feature kPartitionAllocDanglingPtr;
enum class DanglingPtrMode {
// Crash immediately after detecting a dangling raw_ptr.
kCrash, // (default)
// Log the signature of every occurrences without crashing. It is used by
// bots.
// Format "[DanglingSignature]\t<1>\t<2>"
// 1. The function who freed the memory while it was still referenced.
// 2. The function who released the raw_ptr reference.
kLogSignature,
// Note: This will be extended with a single shot DumpWithoutCrashing.
};
extern const BASE_EXPORT base::FeatureParam<DanglingPtrMode>
kDanglingPtrModeParam;
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
extern const BASE_EXPORT Feature kPartitionAllocPCScan; extern const BASE_EXPORT Feature kPartitionAllocPCScan;
#endif // defined(PA_ALLOW_PCSCAN) #endif // defined(PA_ALLOW_PCSCAN)
@ -50,6 +71,9 @@ enum class BackupRefPtrMode {
// This entails splitting the main partition. // This entails splitting the main partition.
kEnabled, kEnabled,
// Same as kEnabled but without zapping quarantined objects.
kEnabledWithoutZapping,
// BRP is disabled, but the main partition is split out, as if BRP was enabled // BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode. // in the "previous slot" mode.
kDisabledButSplitPartitions2Way, kDisabledButSplitPartitions2Way,

View File

@ -50,33 +50,33 @@ namespace {
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString( constexpr const char* ScannerIdToTracingString(
internal::StatsCollector::ScannerId id) { partition_alloc::internal::StatsCollector::ScannerId id) {
switch (id) { switch (id) {
case internal::StatsCollector::ScannerId::kClear: case partition_alloc::internal::StatsCollector::ScannerId::kClear:
return "PCScan.Scanner.Clear"; return "PCScan.Scanner.Clear";
case internal::StatsCollector::ScannerId::kScan: case partition_alloc::internal::StatsCollector::ScannerId::kScan:
return "PCScan.Scanner.Scan"; return "PCScan.Scanner.Scan";
case internal::StatsCollector::ScannerId::kSweep: case partition_alloc::internal::StatsCollector::ScannerId::kSweep:
return "PCScan.Scanner.Sweep"; return "PCScan.Scanner.Sweep";
case internal::StatsCollector::ScannerId::kOverall: case partition_alloc::internal::StatsCollector::ScannerId::kOverall:
return "PCScan.Scanner"; return "PCScan.Scanner";
case internal::StatsCollector::ScannerId::kNumIds: case partition_alloc::internal::StatsCollector::ScannerId::kNumIds:
__builtin_unreachable(); __builtin_unreachable();
} }
} }
constexpr const char* MutatorIdToTracingString( constexpr const char* MutatorIdToTracingString(
internal::StatsCollector::MutatorId id) { partition_alloc::internal::StatsCollector::MutatorId id) {
switch (id) { switch (id) {
case internal::StatsCollector::MutatorId::kClear: case partition_alloc::internal::StatsCollector::MutatorId::kClear:
return "PCScan.Mutator.Clear"; return "PCScan.Mutator.Clear";
case internal::StatsCollector::MutatorId::kScanStack: case partition_alloc::internal::StatsCollector::MutatorId::kScanStack:
return "PCScan.Mutator.ScanStack"; return "PCScan.Mutator.ScanStack";
case internal::StatsCollector::MutatorId::kScan: case partition_alloc::internal::StatsCollector::MutatorId::kScan:
return "PCScan.Mutator.Scan"; return "PCScan.Mutator.Scan";
case internal::StatsCollector::MutatorId::kOverall: case partition_alloc::internal::StatsCollector::MutatorId::kOverall:
return "PCScan.Mutator"; return "PCScan.Mutator";
case internal::StatsCollector::MutatorId::kNumIds: case partition_alloc::internal::StatsCollector::MutatorId::kNumIds:
__builtin_unreachable(); __builtin_unreachable();
} }
} }
@ -85,8 +85,9 @@ constexpr const char* MutatorIdToTracingString(
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes. // Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter { class StatsReporterImpl final : public partition_alloc::StatsReporter {
public: public:
void ReportTraceEvent(internal::StatsCollector::ScannerId id, void ReportTraceEvent(
[[maybe_unused]] uint32_t tid, partition_alloc::internal::StatsCollector::ScannerId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override { int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
@ -104,8 +105,9 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
#endif // BUILDFLAG(ENABLE_BASE_TRACING) #endif // BUILDFLAG(ENABLE_BASE_TRACING)
} }
void ReportTraceEvent(internal::StatsCollector::MutatorId id, void ReportTraceEvent(
[[maybe_unused]] uint32_t tid, partition_alloc::internal::StatsCollector::MutatorId id,
[[maybe_unused]] partition_alloc::internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) override { int64_t end_time_ticks_internal_value) override {
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
@ -156,7 +158,7 @@ void RegisterPCScanStatsReporter() {
DCHECK(!registered); DCHECK(!registered);
internal::PCScan::RegisterStatsReporter(&s_reporter); partition_alloc::internal::PCScan::RegisterStatsReporter(&s_reporter);
registered = true; registered = true;
} }
#endif // defined(PA_ALLOW_PCSCAN) #endif // defined(PA_ALLOW_PCSCAN)
@ -289,6 +291,13 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
brp_group_name = "EnabledPrevSlot"; brp_group_name = "EnabledPrevSlot";
#else #else
brp_group_name = "EnabledBeforeAlloc"; brp_group_name = "EnabledBeforeAlloc";
#endif
break;
case features::BackupRefPtrMode::kEnabledWithoutZapping:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlotWithoutZapping";
#else
brp_group_name = "EnabledBeforeAllocWithoutZapping";
#endif #endif
break; break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way: case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
@ -413,7 +422,6 @@ absl::optional<debug::StackTrace> TakeStackTrace(uintptr_t id) {
// This function is meant to be used only by Chromium developers, to list what // This function is meant to be used only by Chromium developers, to list what
// are all the dangling raw_ptr occurrences in a table. // are all the dangling raw_ptr occurrences in a table.
std::string ExtractDanglingPtrSignature(std::string stacktrace) { std::string ExtractDanglingPtrSignature(std::string stacktrace) {
LOG(ERROR) << stacktrace;
std::vector<StringPiece> lines = SplitStringPiece( std::vector<StringPiece> lines = SplitStringPiece(
stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY); stacktrace, "\r\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
@ -453,7 +461,7 @@ std::string ExtractDanglingPtrSignature(std::string stacktrace) {
return std::string(caller.substr(function_start + 1)); return std::string(caller.substr(function_start + 1));
} }
void DanglingRawPtrReleased(uintptr_t id) { void DanglingRawPtrReleasedLogSignature(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is // This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may // allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory. // allocate memory.
@ -461,7 +469,6 @@ void DanglingRawPtrReleased(uintptr_t id) {
debug::StackTrace stack_trace_release; debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id); absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (FeatureList::IsEnabled(features::kPartitionAllocDanglingPtrRecord)) {
if (stack_trace_free) { if (stack_trace_free) {
LOG(ERROR) << StringPrintf( LOG(ERROR) << StringPrintf(
"[DanglingSignature]\t%s\t%s", "[DanglingSignature]\t%s\t%s",
@ -472,8 +479,14 @@ void DanglingRawPtrReleased(uintptr_t id) {
"[DanglingSignature]\t%s\tmissing-stacktrace", "[DanglingSignature]\t%s\tmissing-stacktrace",
ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str()); ExtractDanglingPtrSignature(stack_trace_release.ToString()).c_str());
} }
return; }
}
void DanglingRawPtrReleasedCrash(uintptr_t id) {
// This is called from raw_ptr<>'s release operation. Making allocations is
// allowed. In particular, symbolizing and printing the StackTraces may
// allocate memory.
debug::StackTrace stack_trace_release;
absl::optional<debug::StackTrace> stack_trace_free = TakeStackTrace(id);
if (stack_trace_free) { if (stack_trace_free) {
LOG(ERROR) << StringPrintf( LOG(ERROR) << StringPrintf(
@ -506,8 +519,24 @@ void InstallDanglingRawPtrChecks() {
// restarting the test executable. // restarting the test executable.
ClearDanglingRawPtrBuffer(); ClearDanglingRawPtrBuffer();
if (!FeatureList::IsEnabled(features::kPartitionAllocDanglingPtr)) {
partition_alloc::SetDanglingRawPtrDetectedFn([](uintptr_t) {});
partition_alloc::SetDanglingRawPtrReleasedFn([](uintptr_t) {});
return;
}
switch (features::kDanglingPtrModeParam.Get()) {
case features::DanglingPtrMode::kCrash:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected); partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleased); partition_alloc::SetDanglingRawPtrReleasedFn(DanglingRawPtrReleasedCrash);
break;
case features::DanglingPtrMode::kLogSignature:
partition_alloc::SetDanglingRawPtrDetectedFn(DanglingRawPtrDetected);
partition_alloc::SetDanglingRawPtrReleasedFn(
DanglingRawPtrReleasedLogSignature);
break;
}
} }
// TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there // TODO(arthursonzogni): There might exist long lived dangling raw_ptr. If there

View File

@ -277,8 +277,6 @@ target(partition_alloc_target_type, "partition_alloc") {
":debugging_buildflags", ":debugging_buildflags",
":logging_buildflags", ":logging_buildflags",
":partition_alloc_buildflags", ":partition_alloc_buildflags",
"//base:synchronization_buildflags",
"//base:tracing_buildflags",
"//build:branding_buildflags", "//build:branding_buildflags",
"//build/config/compiler:compiler_buildflags", "//build/config/compiler:compiler_buildflags",
] ]
@ -324,73 +322,6 @@ target(partition_alloc_target_type, "partition_alloc") {
} }
} }
source_set("test_support") {
sources = [
"extended_api.cc",
"extended_api.h",
"partition_alloc_base/threading/platform_thread_for_testing.h",
]
if (is_posix) {
sources += [
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_fuchsia) {
sources += [
"partition_alloc_base/threading/platform_thread_fuchsia_for_testing.cc",
"partition_alloc_base/threading/platform_thread_posix_for_testing.cc",
]
}
if (is_win) {
sources +=
[ "partition_alloc_base/threading/platform_thread_win_for_testing.cc" ]
}
if (is_mac || is_ios) {
sources +=
[ "partition_alloc_base/threading/platform_thread_mac_for_testing.mm" ]
}
if (is_linux || is_chromeos) {
sources += [
"partition_alloc_base/threading/platform_thread_linux_for_testing.cc",
]
}
if (is_android) {
sources += [
"partition_alloc_base/threading/platform_thread_android_for_testing.cc",
]
}
public_deps = [
":chromecast_buildflags",
":chromeos_buildflags",
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
"//base:synchronization_buildflags",
"//base:tracing_buildflags",
"//build:branding_buildflags",
"//build/config/compiler:compiler_buildflags",
]
public_configs = []
if (is_fuchsia) {
public_deps += [
"//third_party/fuchsia-sdk/sdk/pkg/fit",
"//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
# Needed for users of spinning_mutex.h, which for performance reasons,
# contains inlined calls to `libsync` inside the header file.
# It appends an entry to the "libs" section of the dependent target.
public_configs += [ ":fuchsia_sync_lib" ]
}
deps = [ "//base:base" ]
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_speed" ]
}
}
buildflag_header("partition_alloc_buildflags") { buildflag_header("partition_alloc_buildflags") {
header = "partition_alloc_buildflags.h" header = "partition_alloc_buildflags.h"

View File

@ -1,3 +1,4 @@
bartekn@chromium.org bartekn@chromium.org
haraken@chromium.org haraken@chromium.org
lizeb@chromium.org lizeb@chromium.org
tasak@google.com

View File

@ -21,6 +21,8 @@ paths of allocation and deallocation require very few (reasonably predictable)
branches. The number of operations in the fast paths is minimal, leading to the branches. The number of operations in the fast paths is minimal, leading to the
possibility of inlining. possibility of inlining.
![general architecture](./dot/layers.png)
However, even the fast path isn't the fastest, because it requires taking However, even the fast path isn't the fastest, because it requires taking
a per-partition lock. Although we optimized the lock, there was still room for a per-partition lock. Although we optimized the lock, there was still room for
improvement; to this end, we introduced the thread cache. improvement; to this end, we introduced the thread cache.
@ -102,6 +104,13 @@ partition page that holds metadata (32B struct per partition page).
diagram). diagram).
* Gray fill denotes guard pages (one partition page each at the head and tail * Gray fill denotes guard pages (one partition page each at the head and tail
of each super page). of each super page).
* In some configurations, PartitionAlloc stores more metadata than can
fit in the one system page at the front. These are the bitmaps for
StarScan and `MTECheckedPtr<T>`, and they are relegated to the head of
what would otherwise be usable space for slot spans. One, both, or
none of these bitmaps may be present, depending on build
configuration, runtime configuration, and type of allocation.
See [`SuperPagePayloadBegin()`][payload-start] for details.
As allocation requests arrive, there is eventually a need to allocate a new slot As allocation requests arrive, there is eventually a need to allocate a new slot
span. span.
@ -178,3 +187,4 @@ list.
[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 [SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4
[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454

View File

@ -21,13 +21,6 @@
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace base {
template <typename Type>
struct LazyInstanceTraitsBase;
} // namespace base
namespace partition_alloc { namespace partition_alloc {
class AddressSpaceStatsDumper; class AddressSpaceStatsDumper;
@ -166,8 +159,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager {
#endif // defined(PA_HAS_64_BITS_POINTERS) #endif // defined(PA_HAS_64_BITS_POINTERS)
static AddressPoolManager singleton_; static AddressPoolManager singleton_;
friend struct base::LazyInstanceTraitsBase<AddressPoolManager>;
}; };
PA_ALWAYS_INLINE pool_handle GetRegularPool() { PA_ALWAYS_INLINE pool_handle GetRegularPool() {
@ -185,13 +176,4 @@ PA_ALWAYS_INLINE pool_handle GetConfigurablePool() {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
using ::partition_alloc::internal::AddressPoolManager;
using ::partition_alloc::internal::GetBRPPool;
using ::partition_alloc::internal::GetConfigurablePool;
using ::partition_alloc::internal::GetRegularPool;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_

View File

@ -11,12 +11,4 @@ using pool_handle = unsigned;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::pool_handle;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_

View File

@ -0,0 +1,23 @@
digraph G {
graph[bgcolor=transparent]
node[shape=box,style="filled,rounded",color=deepskyblue]
subgraph cluster_tc {
label = "Thread Cache"
rankdir = LR
{rank=same;TLS1,TLS2,TLSn}
TLS1->TLS2[style=invisible,dir=none]
TLS2->TLSn[style=dotted,dir=none]
}
subgraph cluster_central {
label = "Central Allocator (per-partition lock)"
fast[label="slot span freelists (fast path)"]
slow[label="slot span management (slow path)"]
# Forces slow path node beneath fast path node.
fast->slow[style=invisible,dir=none]
}
# Forces thread-external subgraph beneath thread cache subgraph.
TLS2->fast[style=invisible,dir=none]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -21,6 +21,8 @@ digraph G {
<TD BGCOLOR="darkgrey" HEIGHT="52"></TD> <TD BGCOLOR="darkgrey" HEIGHT="52"></TD>
<TD PORT="metadata"></TD> <TD PORT="metadata"></TD>
<TD BGCOLOR="darkgrey" WIDTH="18"></TD> <TD BGCOLOR="darkgrey" WIDTH="18"></TD>
<!-- Bitmaps -->
<TD WIDTH="100">Bitmaps(?)</TD>
<!-- Several Slot Spans --> <!-- Several Slot Spans -->
<TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD> <TD PORT="red" BGCOLOR="crimson" WIDTH="119">3</TD>
<TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD> <TD PORT="green" BGCOLOR="palegreen" WIDTH="39">1</TD>
@ -50,6 +52,8 @@ digraph G {
<TR> <TR>
<!-- Guard Page Metadata --> <!-- Guard Page Metadata -->
<TD BGCOLOR="darkgrey"> </TD> <TD BGCOLOR="darkgrey"> </TD>
<!-- Bitmaps Offset -->
<TD> B? </TD>
<!-- Red Slot Span Metadata --> <!-- Red Slot Span Metadata -->
<TD BGCOLOR="crimson">v</TD> <TD BGCOLOR="crimson">v</TD>
<TD BGCOLOR="crimson">+</TD> <TD BGCOLOR="crimson">+</TD>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -71,12 +71,4 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) MemoryReclaimer {
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::MemoryReclaimer;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_

View File

@ -329,35 +329,4 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) size_t GetTotalMappedSize();
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::AllocPages;
using ::partition_alloc::AllocPagesWithAlignOffset;
using ::partition_alloc::DecommitAndZeroSystemPages;
using ::partition_alloc::DecommitSystemPages;
using ::partition_alloc::DecommittedMemoryIsAlwaysZeroed;
using ::partition_alloc::DiscardSystemPages;
using ::partition_alloc::FreePages;
using ::partition_alloc::GetAllocPageErrorCode;
using ::partition_alloc::GetTotalMappedSize;
using ::partition_alloc::HasReservationForTesting;
using ::partition_alloc::NextAlignedWithOffset;
using ::partition_alloc::PageAccessibilityConfiguration;
using ::partition_alloc::PageAccessibilityDisposition;
using ::partition_alloc::PageTag;
using ::partition_alloc::RecommitSystemPages;
using ::partition_alloc::ReleaseReservation;
using ::partition_alloc::ReserveAddressSpace;
using ::partition_alloc::RoundDownToPageAllocationGranularity;
using ::partition_alloc::RoundDownToSystemPage;
using ::partition_alloc::RoundUpToPageAllocationGranularity;
using ::partition_alloc::RoundUpToSystemPage;
using ::partition_alloc::SetSystemPagesAccess;
using ::partition_alloc::TryRecommitSystemPages;
using ::partition_alloc::TrySetSystemPagesAccess;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_

View File

@ -42,8 +42,8 @@ namespace partition_alloc::internal {
// Use PageAllocationGranularity(), PageAllocationGranularityShift() // Use PageAllocationGranularity(), PageAllocationGranularityShift()
// to initialize and retrieve these values safely. // to initialize and retrieve these values safely.
struct PageCharacteristics { struct PageCharacteristics {
std::atomic<int> size; std::atomic<size_t> size;
std::atomic<int> shift; std::atomic<size_t> shift;
}; };
extern PageCharacteristics page_characteristics; extern PageCharacteristics page_characteristics;
@ -78,13 +78,14 @@ PageAllocationGranularityShift() {
#elif defined(_MIPS_ARCH_LOONGSON) #elif defined(_MIPS_ARCH_LOONGSON)
return 14; // 16kB return 14; // 16kB
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS) #elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
return vm_page_shift; return static_cast<size_t>(vm_page_shift);
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16) // arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
// page sizes. Retrieve from or initialize cache. // page sizes. Retrieve from or initialize cache.
int shift = page_characteristics.shift.load(std::memory_order_relaxed); size_t shift = page_characteristics.shift.load(std::memory_order_relaxed);
if (PA_UNLIKELY(shift == 0)) { if (PA_UNLIKELY(shift == 0)) {
shift = __builtin_ctz((int)PageAllocationGranularity()); shift = static_cast<size_t>(
__builtin_ctz((unsigned int)PageAllocationGranularity()));
page_characteristics.shift.store(shift, std::memory_order_relaxed); page_characteristics.shift.store(shift, std::memory_order_relaxed);
} }
return shift; return shift;
@ -102,9 +103,9 @@ PageAllocationGranularity() {
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64) #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or // arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
// initialize cache. // initialize cache.
int size = page_characteristics.size.load(std::memory_order_relaxed); size_t size = page_characteristics.size.load(std::memory_order_relaxed);
if (PA_UNLIKELY(size == 0)) { if (PA_UNLIKELY(size == 0)) {
size = getpagesize(); size = static_cast<size_t>(getpagesize());
page_characteristics.size.store(size, std::memory_order_relaxed); page_characteristics.size.store(size, std::memory_order_relaxed);
} }
return size; return size;

View File

@ -17,7 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_IOS) #if BUILDFLAG(IS_IOS)
@ -137,7 +136,7 @@ void PartitionAddressSpace::Init() {
if (!setup_.regular_pool_base_address_) if (!setup_.regular_pool_base_address_)
HandleGigaCageAllocFailure(); HandleGigaCageAllocFailure();
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1) & kMemTagUnmask; setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
#endif #endif
PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1))); PA_DCHECK(!(setup_.regular_pool_base_address_ & (regular_pool_size - 1)));
setup_.regular_pool_ = AddressPoolManager::GetInstance().Add( setup_.regular_pool_ = AddressPoolManager::GetInstance().Add(
@ -164,7 +163,7 @@ void PartitionAddressSpace::Init() {
HandleGigaCageAllocFailure(); HandleGigaCageAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize; setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE) #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1) & kMemTagUnmask; setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
#endif #endif
PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1))); PA_DCHECK(!(setup_.brp_pool_base_address_ & (brp_pool_size - 1)));
setup_.brp_pool_ = AddressPoolManager::GetInstance().Add( setup_.brp_pool_ = AddressPoolManager::GetInstance().Add(

View File

@ -61,7 +61,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset( static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) { uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(USE_BACKUP_REF_PTR) #if !BUILDFLAG(USE_BACKUP_REF_PTR)
PA_DCHECK(!IsInBRPPool(address)); PA_DCHECK(!IsInBRPPool(address));
@ -155,8 +154,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) { static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address)); PA_DCHECK(IsInBRPPool(address));
return ::partition_alloc::internal::UnmaskPtr(address) - return address - setup_.brp_pool_base_address_;
setup_.brp_pool_base_address_;
} }
// PartitionAddressSpace is static_only class. // PartitionAddressSpace is static_only class.
@ -227,19 +225,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE) #if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// Masks used to easy determine belonging to a pool. // Masks used to easy determine belonging to a pool.
// On Arm, the top byte of each pointer is ignored (meaning there are
// effectively 256 versions of each valid pointer). 4 bits are used to store
// tags for Arm's Memory Tagging Extension (MTE). To ensure that tagged
// pointers are recognized as being in the pool, mask off the top byte with
// kMemTagUnmask.
static constexpr uintptr_t kRegularPoolOffsetMask = static constexpr uintptr_t kRegularPoolOffsetMask =
static_cast<uintptr_t>(kRegularPoolSize) - 1; static_cast<uintptr_t>(kRegularPoolSize) - 1;
static constexpr uintptr_t kRegularPoolBaseMask = static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
~kRegularPoolOffsetMask & kMemTagUnmask;
static constexpr uintptr_t kBRPPoolOffsetMask = static constexpr uintptr_t kBRPPoolOffsetMask =
static_cast<uintptr_t>(kBRPPoolSize) - 1; static_cast<uintptr_t>(kBRPPoolSize) - 1;
static constexpr uintptr_t kBRPPoolBaseMask = static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
~kBRPPoolOffsetMask & kMemTagUnmask;
#endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE) #endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// This must be set to such a value that IsIn*Pool() always returns false when // This must be set to such a value that IsIn*Pool() always returns false when

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_ref_count.h" #include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
// Prefetch *x into memory. // Prefetch *x into memory.
@ -53,7 +54,7 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
static thread_local uint8_t counter = 0; static thread_local uint8_t counter = 0;
if (PA_UNLIKELY(counter == 0)) { if (PA_UNLIKELY(counter == 0)) {
// It's OK to truncate this value. // It's OK to truncate this value.
counter = static_cast<uint8_t>(::partition_alloc::internal::RandomValue()); counter = static_cast<uint8_t>(RandomValue());
} }
// If `counter` is 0, this will wrap. That is intentional and OK. // If `counter` is 0, this will wrap. That is intentional and OK.
counter--; counter--;
@ -61,17 +62,22 @@ PA_ALWAYS_INLINE bool RandomPeriod() {
} }
#endif // !BUILDFLAG(PA_DCHECK_IS_ON) #endif // !BUILDFLAG(PA_DCHECK_IS_ON)
PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(const void* ptr) {
return UntagPtr(ptr);
}
PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(const void* object) {
// TODO(bartekn): Check that |object| is indeed an object start.
return ObjectInnerPtr2Addr(object);
}
PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return TagAddr(slot_start);
}
PA_ALWAYS_INLINE uintptr_t SlotStartPtr2Addr(const void* slot_start) {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return UntagPtr(slot_start);
}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::SecureMemset;
#if !BUILDFLAG(PA_DCHECK_IS_ON)
using ::partition_alloc::internal::RandomPeriod;
#endif // !BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_

View File

@ -70,6 +70,29 @@ void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
internal::MaxSystemPagesPerRegularSlotSpan() <= 16, internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
"System pages per slot span must be no greater than 16."); "System pages per slot span must be no greater than 16.");
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
STATIC_ASSERT_OR_PA_CHECK(
internal::GetPartitionRefCountIndexMultiplierShift() <
std::numeric_limits<size_t>::max() / 2,
"Calculation in GetPartitionRefCountIndexMultiplierShift() must not "
"underflow.");
// Check that the GetPartitionRefCountIndexMultiplierShift() calculation is
// correct.
STATIC_ASSERT_OR_PA_CHECK(
(1 << internal::GetPartitionRefCountIndexMultiplierShift()) ==
(internal::SystemPageSize() /
(sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))),
"Bitshift must match the intended multiplication.");
STATIC_ASSERT_OR_PA_CHECK(
((sizeof(internal::PartitionRefCount) *
(internal::kSuperPageSize / internal::SystemPageSize()))
<< internal::GetPartitionRefCountIndexMultiplierShift()) <=
internal::SystemPageSize(),
"PartitionRefCount Bitmap size must be smaller than or equal to "
"<= SystemPageSize().");
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
PA_DCHECK(on_out_of_memory); PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory; internal::g_oom_handling_function = on_out_of_memory;
} }

View File

@ -11,6 +11,36 @@
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_root.h" #include "base/allocator/partition_allocator/partition_root.h"
// *** HOUSEKEEPING RULES ***
//
// Throughout PartitionAlloc code, we avoid using generic variable names like
// |ptr| or |address|, and prefer names like |object|, |slot_start|, instead.
// This helps emphasize that terms like "object" and "slot" represent two
// different worlds. "Slot" is an indivisible allocation unit, internal to
// PartitionAlloc. It is generally represented as an address (uintptr_t), since
// arithmetic operations on it aren't uncommon, and for that reason it isn't
// MTE-tagged either. "Object" is the allocated memory that the app is given via
// interfaces like Alloc(), Free(), etc. An object is fully contained within a
// slot, and may be surrounded by internal PartitionAlloc structures or empty
// space. Is is generally represented as a pointer to its beginning (most
// commonly void*), and is MTE-tagged so it's safe to access.
//
// The best way to transition between these to worlds is via
// PartitionRoot::ObjectToSlotStart() and ::SlotStartToObject(). These take care
// of shifting between slot/object start, MTE-tagging/untagging and the cast for
// you. There are cases where these functions are insufficient. Internal
// PartitionAlloc structures, like free-list pointers, BRP ref-count, cookie,
// etc. are located in-slot thus accessing them requires an MTE tag.
// SlotStartPtr2Addr() and SlotStartAddr2Ptr() take care of this.
// There are cases where we have to do pointer arithmetic on an object pointer
// (like check belonging to a pool, etc.), in which case we want to strip MTE
// tag. ObjectInnerPtr2Addr() and ObjectPtr2Addr() take care of that.
//
// Avoid using UntagPtr/Addr() and TagPtr/Addr() directly, if possible. And
// definitely avoid using reinterpret_cast between uintptr_t and pointer worlds.
// When you do, add a comment explaining why it's safe from the point of MTE
// tagging.
namespace partition_alloc { namespace partition_alloc {
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_COMPONENT_EXPORT(PARTITION_ALLOC)
@ -43,14 +73,4 @@ using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionAllocator;
using ::partition_alloc::PartitionAllocGlobalInit;
using ::partition_alloc::PartitionAllocGlobalUninitForTesting;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_

View File

@ -17,10 +17,6 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(COMPILER_MSVC)
#include <intrin.h>
#endif
namespace partition_alloc::internal::base::bits { namespace partition_alloc::internal::base::bits {
// Returns true iff |value| is a power of 2. // Returns true iff |value| is a power of 2.
@ -75,85 +71,6 @@ inline T* AlignUp(T* ptr, size_t alignment) {
// //
// C does not have an operator to do this, but fortunately the various // C does not have an operator to do this, but fortunately the various
// compilers have built-ins that map to fast underlying processor instructions. // compilers have built-ins that map to fast underlying processor instructions.
//
// Prefer the clang path on Windows, as _BitScanReverse() and friends are not
// constexpr.
#if defined(COMPILER_MSVC) && !defined(__clang__)
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
int>::type
CountLeadingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
return PA_LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
? (31 - index - (32 - bits))
: bits;
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
int>::type
CountLeadingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
// MSVC only supplies _BitScanReverse64 when building for a 64-bit target.
#if defined(ARCH_CPU_64_BITS)
return PA_LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
? (63 - index)
: 64;
#else
uint32_t left = static_cast<uint32_t>(x >> 32);
if (PA_LIKELY(_BitScanReverse(&index, left)))
return 31 - index;
uint32_t right = static_cast<uint32_t>(x);
if (PA_LIKELY(_BitScanReverse(&index, right)))
return 63 - index;
return 64;
#endif
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
int>::type
CountTrailingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
return PA_LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
: bits;
}
template <typename T, int bits = sizeof(T) * 8>
PA_ALWAYS_INLINE
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
int>::type
CountTrailingZeroBits(T x) {
static_assert(bits > 0, "invalid instantiation");
unsigned long index;
// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
#if defined(ARCH_CPU_64_BITS)
return PA_LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
: 64;
#else
uint32_t right = static_cast<uint32_t>(x);
if (PA_LIKELY(_BitScanForward(&index, right)))
return index;
uint32_t left = static_cast<uint32_t>(x >> 32);
if (PA_LIKELY(_BitScanForward(&index, left)))
return 32 + index;
return 64;
#endif
}
#elif defined(COMPILER_GCC) || defined(__clang__)
// __builtin_clz has undefined behaviour for an input of 0, even though there's // __builtin_clz has undefined behaviour for an input of 0, even though there's
// clearly a return value that makes sense, and even though some processor clz // clearly a return value that makes sense, and even though some processor clz
// instructions have defined behaviour for 0. We could drop to raw __asm__ to // instructions have defined behaviour for 0. We could drop to raw __asm__ to
@ -182,8 +99,6 @@ PA_ALWAYS_INLINE constexpr
: bits; : bits;
} }
#endif
// Returns the integer i such as 2^i <= n < 2^(i+1). // Returns the integer i such as 2^i <= n < 2^(i+1).
// //
// There is a common `BitLength` function, which returns the number of bits // There is a common `BitLength` function, which returns the number of bits

View File

@ -10,7 +10,7 @@
// on build time. Try not to raise this limit unless necessary. See // on build time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md // https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV #ifndef NACL_TC_REV
#pragma clang max_tokens_here 340000 #pragma clang max_tokens_here 370000
#endif #endif
#include <string.h> #include <string.h>

View File

@ -9,12 +9,6 @@ namespace base {
class LapTimer; class LapTimer;
template <typename Type, typename Traits>
class LazyInstance;
template <typename Type>
struct LazyInstanceTraitsBase;
} // namespace base } // namespace base
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
@ -22,8 +16,6 @@ namespace partition_alloc::internal::base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once // TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done. // the migration to the new namespaces gets done.
using ::base::LapTimer; using ::base::LapTimer;
using ::base::LazyInstance;
using ::base::LazyInstanceTraitsBase;
} // namespace partition_alloc::internal::base } // namespace partition_alloc::internal::base

View File

@ -23,7 +23,7 @@
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness. // Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
inline uint16_t ByteSwap(uint16_t x) { inline constexpr uint16_t ByteSwap(uint16_t x) {
#if defined(COMPILER_MSVC) && !defined(__clang__) #if defined(COMPILER_MSVC) && !defined(__clang__)
return _byteswap_ushort(x); return _byteswap_ushort(x);
#else #else
@ -31,7 +31,7 @@ inline uint16_t ByteSwap(uint16_t x) {
#endif #endif
} }
inline uint32_t ByteSwap(uint32_t x) { inline constexpr uint32_t ByteSwap(uint32_t x) {
#if defined(COMPILER_MSVC) && !defined(__clang__) #if defined(COMPILER_MSVC) && !defined(__clang__)
return _byteswap_ulong(x); return _byteswap_ulong(x);
#else #else
@ -69,21 +69,21 @@ inline constexpr uintptr_t ByteSwapUintPtrT(uintptr_t x) {
// Converts the bytes in |x| from host order (endianness) to little endian, and // Converts the bytes in |x| from host order (endianness) to little endian, and
// returns the result. // returns the result.
inline uint16_t ByteSwapToLE16(uint16_t x) { inline constexpr uint16_t ByteSwapToLE16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return x; return x;
#else #else
return ByteSwap(x); return ByteSwap(x);
#endif #endif
} }
inline uint32_t ByteSwapToLE32(uint32_t x) { inline constexpr uint32_t ByteSwapToLE32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return x; return x;
#else #else
return ByteSwap(x); return ByteSwap(x);
#endif #endif
} }
inline uint64_t ByteSwapToLE64(uint64_t x) { inline constexpr uint64_t ByteSwapToLE64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return x; return x;
#else #else
@ -93,21 +93,21 @@ inline uint64_t ByteSwapToLE64(uint64_t x) {
// Converts the bytes in |x| from network to host order (endianness), and // Converts the bytes in |x| from network to host order (endianness), and
// returns the result. // returns the result.
inline uint16_t NetToHost16(uint16_t x) { inline constexpr uint16_t NetToHost16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else
return x; return x;
#endif #endif
} }
inline uint32_t NetToHost32(uint32_t x) { inline constexpr uint32_t NetToHost32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else
return x; return x;
#endif #endif
} }
inline uint64_t NetToHost64(uint64_t x) { inline constexpr uint64_t NetToHost64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else
@ -117,21 +117,21 @@ inline uint64_t NetToHost64(uint64_t x) {
// Converts the bytes in |x| from host to network order (endianness), and // Converts the bytes in |x| from host to network order (endianness), and
// returns the result. // returns the result.
inline uint16_t HostToNet16(uint16_t x) { inline constexpr uint16_t HostToNet16(uint16_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else
return x; return x;
#endif #endif
} }
inline uint32_t HostToNet32(uint32_t x) { inline constexpr uint32_t HostToNet32(uint32_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else
return x; return x;
#endif #endif
} }
inline uint64_t HostToNet64(uint64_t x) { inline constexpr uint64_t HostToNet64(uint64_t x) {
#if defined(ARCH_CPU_LITTLE_ENDIAN) #if defined(ARCH_CPU_LITTLE_ENDIAN)
return ByteSwap(x); return ByteSwap(x);
#else #else

View File

@ -12,6 +12,7 @@
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> #include <sys/types.h>
#include <unistd.h> #include <unistd.h>
#include <memory>
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h" #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h"

View File

@ -10,7 +10,7 @@
// time.h is a widely included header and its size impacts build time. // time.h is a widely included header and its size impacts build time.
// Try not to raise this limit unless necessary. See // Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md // https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#pragma clang max_tokens_here 390000 #pragma clang max_tokens_here 490000
#endif // BUILDFLAG(IS_LINUX) #endif // BUILDFLAG(IS_LINUX)
#include <atomic> #include <atomic>

View File

@ -327,7 +327,7 @@ DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
// "rollover" counter. // "rollover" counter.
union LastTimeAndRolloversState { union LastTimeAndRolloversState {
// The state as a single 32-bit opaque value. // The state as a single 32-bit opaque value.
std::atomic<int32_t> as_opaque_32; std::atomic<int32_t> as_opaque_32{0};
// The state as usable values. // The state as usable values.
struct { struct {

View File

@ -27,35 +27,30 @@ namespace partition_alloc {
// Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags, // Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
// AlignedAllocWithFlags, etc. // AlignedAllocWithFlags, etc.
struct AllocFlags { struct AllocFlags {
// In order to support bit operations like `flag_a | flag_b`, the old- static constexpr unsigned int kReturnNull = 1 << 0;
// fashioned enum (+ surrounding named struct) is used instead of enum class. static constexpr unsigned int kZeroFill = 1 << 1;
enum : unsigned int {
kReturnNull = 1 << 0,
kZeroFill = 1 << 1,
// Don't allow allocation override hooks. Override hooks are expected to // Don't allow allocation override hooks. Override hooks are expected to
// check for the presence of this flag and return false if it is active. // check for the presence of this flag and return false if it is active.
kNoOverrideHooks = 1 << 2, static constexpr unsigned int kNoOverrideHooks = 1 << 2;
// Never let a memory tool like ASan (if active) perform the allocation. // Never let a memory tool like ASan (if active) perform the allocation.
kNoMemoryToolOverride = 1 << 3, static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
// Don't allow any hooks (override or observers). // Don't allow any hooks (override or observers).
kNoHooks = 1 << 4, // Internal only. static constexpr unsigned int kNoHooks = 1 << 4; // Internal.
// If the allocation requires a "slow path" (such as allocating/committing a // If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large // new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for // allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common. // smaller ones, a nullptr value is common.
kFastPathOrReturnNull = 1 << 5, // Internal only. static constexpr unsigned int kFastPathOrReturnNull = 1 << 5; // Internal.
kLastFlag = kFastPathOrReturnNull static constexpr unsigned int kLastFlag = kFastPathOrReturnNull;
};
}; };
// Bit flag constants used as `flag` argument of PartitionRoot::FreeWithFlags. // Bit flag constants used as `flag` argument of PartitionRoot::FreeWithFlags.
struct FreeFlags { struct FreeFlags {
enum : unsigned int { // See AllocFlags::kNoMemoryToolOverride.
kNoMemoryToolOverride = 1 << 0, // See AllocFlags::kNoMemoryToolOverride. static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
kLastFlag = kNoMemoryToolOverride static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
};
}; };
namespace internal { namespace internal {
@ -245,7 +240,7 @@ constexpr size_t kSuperPageShift = 21; // 2 MiB
constexpr size_t kSuperPageSize = 1 << kSuperPageShift; constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
constexpr size_t kSuperPageAlignment = kSuperPageSize; constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1; constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask & kMemTagUnmask; constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and // GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and
// one that doesn't. // one that doesn't.
@ -283,14 +278,14 @@ static constexpr pool_handle kConfigurablePoolHandle = 3;
constexpr size_t kMaxMemoryTaggingSize = 1024; constexpr size_t kMaxMemoryTaggingSize = 1024;
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
// Returns whether the tag of |object| overflowed and the containing slot needs // Returns whether the tag of |object| overflowed, meaning the containing slot
// to be moved to quarantine. // needs to be moved to quarantine.
PA_ALWAYS_INLINE bool HasOverflowTag(void* object) { PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
// The tag with which the slot is put to quarantine. // The tag with which the slot is put to quarantine.
constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL; constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
static_assert((kOverflowTag & ~kMemTagUnmask) != 0, static_assert((kOverflowTag & kPtrTagMask) != 0,
"Overflow tag must be in tag bits"); "Overflow tag must be in tag bits");
return (reinterpret_cast<uintptr_t>(object) & ~kMemTagUnmask) == kOverflowTag; return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
} }
#endif // defined(PA_HAS_MEMORY_TAGGING) #endif // defined(PA_HAS_MEMORY_TAGGING)

View File

@ -25,16 +25,9 @@ namespace internal {
// the second one 16. We could technically return something different for // the second one 16. We could technically return something different for
// malloc() and operator new(), but this would complicate things, and most of // malloc() and operator new(), but this would complicate things, and most of
// our allocations are presumably coming from operator new() anyway. // our allocations are presumably coming from operator new() anyway.
//
// __STDCPP_DEFAULT_NEW_ALIGNMENT__ is C++17. As such, it is not defined on all
// platforms, as Chrome's requirement is C++14 as of 2020.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
constexpr size_t kAlignment = constexpr size_t kAlignment =
std::max(alignof(max_align_t), std::max(alignof(max_align_t),
static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__)); static_cast<size_t>(__STDCPP_DEFAULT_NEW_ALIGNMENT__));
#else
constexpr size_t kAlignment = alignof(max_align_t);
#endif
static_assert(kAlignment <= 16, static_assert(kAlignment <= 16,
"PartitionAlloc doesn't support a fundamental alignment larger " "PartitionAlloc doesn't support a fundamental alignment larger "
"than 16 bytes."); "than 16 bytes.");
@ -62,14 +55,6 @@ using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionRoot;
} // namespace base
// From https://clang.llvm.org/docs/AttributeReference.html#malloc: // From https://clang.llvm.org/docs/AttributeReference.html#malloc:
// //
// The malloc attribute indicates that the function acts like a system memory // The malloc attribute indicates that the function acts like a system memory

View File

@ -82,12 +82,4 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionAllocHooks;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_HOOKS_H_

View File

@ -803,7 +803,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
PageAccessibilityConfiguration::kReadWrite, PageAccessibilityConfiguration::kReadWrite,
PageAccessibilityDisposition::kRequireUpdate); PageAccessibilityDisposition::kRequireUpdate);
} }
::base::internal::PCScan::RegisterNewSuperPage(root, super_page); PCScan::RegisterNewSuperPage(root, super_page);
} }
return payload; return payload;
@ -883,9 +883,8 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
} }
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) { if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
// Ensure the memory tag of the return_slot is unguessable. // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
return_slot = TagMemoryRangeRandomly(return_slot, size);
::partition_alloc::internal::TagMemoryRangeRandomly(return_slot, size);
} }
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag()); PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag());
@ -896,14 +895,20 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
uintptr_t next_slot_end = next_slot + size; uintptr_t next_slot_end = next_slot + size;
size_t free_list_entries_added = 0; size_t free_list_entries_added = 0;
while (next_slot_end <= commit_end) { while (next_slot_end <= commit_end) {
void* next_slot_ptr;
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) { if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
next_slot = // Ensure the MTE-tag of the memory pointed by other provisioned slot is
::partition_alloc::internal::TagMemoryRangeRandomly(next_slot, size); // unguessable. They will be returned to the app as is, and the MTE-tag
// will only change upon calling Free().
next_slot_ptr = TagMemoryRangeRandomly(next_slot, size);
} else {
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
} }
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag()); PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot); auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) { if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry); PA_DCHECK(!prev_entry);
PA_DCHECK(!free_list_entries_added); PA_DCHECK(!free_list_entries_added);

View File

@ -202,12 +202,4 @@ struct PartitionBucket {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionBucket;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_

View File

@ -270,12 +270,4 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::BucketIndexLookup;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_

View File

@ -44,18 +44,4 @@ PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {}
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::kCookieSize;
using ::partition_alloc::internal::kPartitionCookieSizeAdjustment;
using ::partition_alloc::internal::PartitionCookieCheckValue;
using ::partition_alloc::internal::PartitionCookieWriteValue;
#if BUILDFLAG(PA_DCHECK_IS_ON)
using ::partition_alloc::internal::kCookieValue;
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_

View File

@ -72,13 +72,4 @@ PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionDirectMapExtent;
using ::partition_alloc::internal::PartitionDirectMapMetadata;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_

View File

@ -41,6 +41,7 @@ class EncodedPartitionFreelistEntryPtr {
std::nullptr_t) std::nullptr_t)
: encoded_(Transform(0)) {} : encoded_(Transform(0)) {}
explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr) explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
// The encoded pointer stays MTE-tagged.
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {} : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const { PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
@ -117,11 +118,15 @@ class PartitionFreelistEntry {
// Emplaces the freelist entry at the beginning of the given slot span, and // Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it as null-terminated. // initializes it as null-terminated.
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull( static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) { void* slot_start_tagged) {
auto* entry = new (reinterpret_cast<void*>(slot_start)) // |slot_start_tagged| is MTE-tagged.
PartitionFreelistEntry(nullptr); auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
return entry; return entry;
} }
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) {
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
}
// Emplaces the freelist entry at the beginning of the given slot span, and // Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it with the given |next| pointer, but encoded. // initializes it with the given |next| pointer, but encoded.
@ -133,7 +138,7 @@ class PartitionFreelistEntry {
uintptr_t slot_start, uintptr_t slot_start,
PartitionFreelistEntry* next) { PartitionFreelistEntry* next) {
auto* entry = auto* entry =
new (reinterpret_cast<void*>(slot_start)) PartitionFreelistEntry(next); new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
return entry; return entry;
} }
@ -145,7 +150,7 @@ class PartitionFreelistEntry {
static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start, static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
void* next, void* next,
bool make_shadow_match) { bool make_shadow_match) {
new (reinterpret_cast<void*>(slot_start)) new (SlotStartAddr2Ptr(slot_start))
PartitionFreelistEntry(next, make_shadow_match); PartitionFreelistEntry(next, make_shadow_match);
} }
@ -174,7 +179,7 @@ class PartitionFreelistEntry {
} }
} }
PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) { PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
// SetNext() is either called on the freelist head, when provisioning new // SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the // slots, or when GetNext() has been called before, no need to pass the
// size. // size.
@ -182,15 +187,14 @@ class PartitionFreelistEntry {
// Regular freelists always point to an entry within the same super page. // Regular freelists always point to an entry within the same super page.
// //
// This is most likely a PartitionAlloc bug if this triggers. // This is most likely a PartitionAlloc bug if this triggers.
if (PA_UNLIKELY( if (PA_UNLIKELY(entry &&
ptr && (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) != (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) {
FreelistCorruptionDetected(0); FreelistCorruptionDetected(0);
} }
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
encoded_next_ = EncodedPartitionFreelistEntryPtr(ptr); encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY) #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
shadow_ = encoded_next_.Inverted(); shadow_ = encoded_next_.Inverted();
#endif #endif
@ -204,8 +208,7 @@ class PartitionFreelistEntry {
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY) #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
shadow_ = 0; shadow_ = 0;
#endif #endif
uintptr_t slot_start = reinterpret_cast<uintptr_t>(this); return SlotStartPtr2Addr(this);
return slot_start;
} }
PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const { PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
@ -229,8 +232,8 @@ class PartitionFreelistEntry {
// //
// Also, the lightweight UaF detection (pointer shadow) is checked. // Also, the lightweight UaF detection (pointer shadow) is checked.
uintptr_t here_address = reinterpret_cast<uintptr_t>(here); uintptr_t here_address = SlotStartPtr2Addr(here);
uintptr_t next_address = reinterpret_cast<uintptr_t>(next); uintptr_t next_address = SlotStartPtr2Addr(next);
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY) #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_; bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
@ -327,12 +330,4 @@ PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionFreelistEntry;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_

View File

@ -37,24 +37,4 @@ extern OomFunction g_oom_handling_function;
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::OomFunction;
namespace internal {
using ::partition_alloc::internal::g_oom_handling_function;
using ::partition_alloc::internal::PartitionExcessiveAllocationSize;
#if !defined(ARCH_CPU_64_BITS)
using ::partition_alloc::internal::PartitionOutOfMemoryWithLargeVirtualSize;
using ::partition_alloc::internal::
PartitionOutOfMemoryWithLotsOfUncommitedPages;
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_

View File

@ -263,9 +263,7 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
for (PartitionFreelistEntry* head = freelist_head; head; for (PartitionFreelistEntry* head = freelist_head; head;
head = head->GetNext(slot_size)) { head = head->GetNext(slot_size)) {
++num_free_slots; ++num_free_slots;
size_t offset_in_slot_span = ::partition_alloc::internal::UnmaskPtr( size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
reinterpret_cast<uintptr_t>(head)) -
slot_span_start;
size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span); size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span);
PA_DCHECK(slot_number < num_provisioned_slots); PA_DCHECK(slot_number < num_provisioned_slots);
free_slots[slot_number] = true; free_slots[slot_number] = true;
@ -280,9 +278,8 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
for (size_t slot_number = 0; slot_number < num_provisioned_slots; for (size_t slot_number = 0; slot_number < num_provisioned_slots;
slot_number++) { slot_number++) {
if (free_slots[slot_number]) { if (free_slots[slot_number]) {
uintptr_t slot_address = ::partition_alloc::internal::RemaskPtr( uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
slot_span_start + (slot_size * slot_number)); auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_address);
if (!head) if (!head)
head = entry; head = entry;

View File

@ -502,7 +502,6 @@ SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
// surely never contain user data. // surely never contain user data.
PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address, PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
bool with_quarantine) { bool with_quarantine) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// Quarantine can only be enabled for normal buckets in the current code. // Quarantine can only be enabled for normal buckets in the current code.
PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address)); PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
uintptr_t super_page = address & kSuperPageBaseMask; uintptr_t super_page = address & kSuperPageBaseMask;
@ -581,7 +580,6 @@ PA_ALWAYS_INLINE uintptr_t SlotSpanMetadata<thread_safe>::ToSlotSpanStart(
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>* PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromAddr(uintptr_t address) { SlotSpanMetadata<thread_safe>::FromAddr(uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
auto* page = PartitionPage<thread_safe>::FromAddr(address); auto* page = PartitionPage<thread_safe>::FromAddr(address);
PA_DCHECK(page->is_valid); PA_DCHECK(page->is_valid);
// Partition pages in the same slot span share the same SlotSpanMetadata // Partition pages in the same slot span share the same SlotSpanMetadata
@ -611,9 +609,7 @@ SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// Checks that the pointer is a multiple of slot size. // Checks that the pointer is a multiple of slot size.
uintptr_t slot_span_start = ToSlotSpanStart(slot_span); uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
PA_DCHECK(!((::partition_alloc::internal::UnmaskPtr(slot_start) - PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size));
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
return slot_span; return slot_span;
} }
@ -625,16 +621,14 @@ SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>* PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromObject(void* object) { SlotSpanMetadata<thread_safe>::FromObject(void* object) {
uintptr_t object_addr = PartitionRoot<thread_safe>::ObjectPtr2Addr(object); uintptr_t object_addr = ObjectPtr2Addr(object);
auto* slot_span = FromAddr(object_addr); auto* slot_span = FromAddr(object_addr);
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// Checks that the object is exactly |extras_offset| away from a multiple of // Checks that the object is exactly |extras_offset| away from a multiple of
// slot size (i.e. from a slot start). // slot size (i.e. from a slot start).
uintptr_t slot_span_start = ToSlotSpanStart(slot_span); uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span); auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
PA_DCHECK((::partition_alloc::internal::UnmaskPtr(object_addr) - PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size ==
root->flags.extras_offset); root->flags.extras_offset);
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
return slot_span; return slot_span;
@ -665,8 +659,7 @@ SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>* PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromObjectInnerPtr(void* ptr) { SlotSpanMetadata<thread_safe>::FromObjectInnerPtr(void* ptr) {
return FromObjectInnerAddr( return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
PartitionRoot<thread_safe>::ObjectInnerPtr2Addr(ptr));
} }
template <bool thread_safe> template <bool thread_safe>
@ -688,9 +681,14 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead( PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
PartitionFreelistEntry* new_head) { PartitionFreelistEntry* new_head) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
// |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
// as well.
uintptr_t new_head_untagged = UntagPtr(new_head);
PA_DCHECK(!new_head || PA_DCHECK(!new_head ||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) == (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
(reinterpret_cast<uintptr_t>(new_head) & kSuperPageBaseMask)); (new_head_untagged & kSuperPageBaseMask));
#endif
freelist_head = new_head; freelist_head = new_head;
// Inserted something new in the freelist, assume that it is not sorted // Inserted something new in the freelist, assume that it is not sorted
// anymore. // anymore.
@ -720,7 +718,8 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(uintptr_t slot_start)
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
#endif #endif
auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(slot_start); auto* entry = static_cast<internal::PartitionFreelistEntry*>(
SlotStartAddr2Ptr(slot_start));
// Catches an immediate double free. // Catches an immediate double free.
PA_CHECK(entry != freelist_head); PA_CHECK(entry != freelist_head);
// Look for double free one level deeper in debug. // Look for double free one level deeper in debug.
@ -762,11 +761,10 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::AppendFreeList(
size_t number_of_entries = 0; size_t number_of_entries = 0;
for (auto* entry = head; entry; for (auto* entry = head; entry;
entry = entry->GetNext(bucket->slot_size), ++number_of_entries) { entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
uintptr_t unmasked_entry = ::partition_alloc::internal::UnmaskPtr( uintptr_t untagged_entry = UntagPtr(entry);
reinterpret_cast<uintptr_t>(entry));
// Check that all entries belong to this slot span. // Check that all entries belong to this slot span.
PA_DCHECK(ToSlotSpanStart(this) <= unmasked_entry); PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
PA_DCHECK(unmasked_entry < PA_DCHECK(untagged_entry <
ToSlotSpanStart(this) + bucket->get_bytes_per_span()); ToSlotSpanStart(this) + bucket->get_bytes_per_span());
} }
PA_DCHECK(number_of_entries == number_of_freed); PA_DCHECK(number_of_entries == number_of_freed);
@ -902,23 +900,4 @@ void IterateSlotSpans(uintptr_t super_page,
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::AllocationStateMap;
using ::partition_alloc::internal::CommittedStateBitmapSize;
using ::partition_alloc::internal::IterateSlotSpans;
using ::partition_alloc::internal::PartitionPage;
using ::partition_alloc::internal::PartitionSuperPageExtentEntry;
using ::partition_alloc::internal::PartitionSuperPageToExtent;
using ::partition_alloc::internal::PartitionSuperPageToMetadataArea;
using ::partition_alloc::internal::ReservedStateBitmapSize;
using ::partition_alloc::internal::SlotSpanMetadata;
using ::partition_alloc::internal::StateBitmapFromAddr;
using ::partition_alloc::internal::SuperPageStateBitmap;
using ::partition_alloc::internal::SuperPageStateBitmapAddr;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_

View File

@ -319,19 +319,49 @@ static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
constexpr size_t kPartitionRefCountOffsetAdjustment = 0; constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
constexpr size_t kPartitionPastAllocationAdjustment = 0; constexpr size_t kPartitionPastAllocationAdjustment = 0;
constexpr size_t kPartitionRefCountIndexMultiplier = #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
SystemPageSize() /
(sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()));
static_assert((sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize()) * #if defined(PA_REF_COUNT_CHECK_COOKIE) || \
kPartitionRefCountIndexMultiplier <= defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
SystemPageSize()), static constexpr size_t kPartitionRefCountSizeShift = 4;
"PartitionRefCount Bitmap size must be smaller than or equal to " #else // defined(PA_REF_COUNT_CHECK_COOKIE) ||
"<= SystemPageSize()."); // defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 3;
#endif // defined(PA_REF_COUNT_CHECK_COOKIE) ||
// defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
#else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
#if defined(PA_REF_COUNT_CHECK_COOKIE) && \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 4;
#elif defined(PA_REF_COUNT_CHECK_COOKIE) || \
defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
static constexpr size_t kPartitionRefCountSizeShift = 3;
#else
static constexpr size_t kPartitionRefCountSizeShift = 2;
#endif
#endif // defined(PA_REF_COUNT_CHECK_COOKIE)
static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
// We need one PartitionRefCount for each system page in a super page. They take
// `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
// They need to fit into a system page of metadata as sparsely as possible to
// minimize cache line sharing, hence we calculate a multiplier as
// `SystemPageSize() / x`.
//
// The multiplier is expressed as a bitshift to optimize the code generation.
// SystemPageSize() isn't always a constrexpr, in which case the compiler
// wouldn't know it's a power of two. The equivalence of these calculations is
// checked in PartitionAllocGlobalInit().
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetPartitionRefCountIndexMultiplierShift() {
return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
}
PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer( PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
uintptr_t slot_start) { uintptr_t slot_start) {
PA_DCHECK(slot_start == ::partition_alloc::internal::RemaskPtr(slot_start));
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckThatSlotOffsetIsZero(slot_start); CheckThatSlotOffsetIsZero(slot_start);
#endif #endif
@ -340,17 +370,20 @@ PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0); PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
#endif #endif
// Have to remask because the previous pointer's tag is unpredictable. There // Have to MTE-tag, because the address is untagged, but lies within a slot
// could be a race condition though if the previous slot is freed/retagged // area, which is protected by MTE.
// concurrently, so ideally the ref count should occupy its own MTE granule. //
// There could be a race condition though if the previous slot is
// freed/retagged concurrently, so ideally the ref count should occupy its
// own MTE granule.
// TODO(richard.townsend@arm.com): improve this. // TODO(richard.townsend@arm.com): improve this.
return ::partition_alloc::internal::RemaskPtr( return static_cast<PartitionRefCount*>(TagAddr(refcount_address));
reinterpret_cast<PartitionRefCount*>(refcount_address));
} else { } else {
// No need to tag, as the metadata region isn't protected by MTE.
PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>( PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
(slot_start & kSuperPageBaseMask) + SystemPageSize() * 2); (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift()) * size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
kPartitionRefCountIndexMultiplier; << GetPartitionRefCountIndexMultiplierShift();
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize()); PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
#endif #endif
@ -375,7 +408,9 @@ PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
CheckThatSlotOffsetIsZero(slot_start); CheckThatSlotOffsetIsZero(slot_start);
#endif #endif
return reinterpret_cast<PartitionRefCount*>(slot_start); // Have to MTE-tag, because the address is untagged, but lies within a slot
// area, which is protected by MTE.
return static_cast<PartitionRefCount*>(TagAddr(slot_start));
} }
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
@ -394,19 +429,4 @@ constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
#if BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::kPartitionPastAllocationAdjustment;
using ::partition_alloc::internal::PartitionRefCount;
using ::partition_alloc::internal::PartitionRefCountPointer;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::kInSlotRefCountBufferSize;
using ::partition_alloc::internal::kPartitionRefCountOffsetAdjustment;
using ::partition_alloc::internal::kPartitionRefCountSizeAdjustment;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_

View File

@ -337,10 +337,8 @@ static size_t PartitionPurgeSlotSpan(
// slots are not in use. // slots are not in use.
for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry; for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry;
/**/) { /**/) {
size_t slot_index = (::partition_alloc::internal::UnmaskPtr( size_t slot_index =
reinterpret_cast<uintptr_t>(entry)) - (SlotStartPtr2Addr(entry) - slot_span_start) / slot_size;
slot_span_start) /
slot_size;
PA_DCHECK(slot_index < num_slots); PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0; slot_usage[slot_index] = 0;
#if !BUILDFLAG(IS_WIN) #if !BUILDFLAG(IS_WIN)
@ -479,12 +477,11 @@ static void PartitionDumpSlotSpanStats(
if (slot_span->CanStoreRawSize()) { if (slot_span->CanStoreRawSize()) {
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize()); stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
stats_out->active_count += 1;
} else { } else {
stats_out->active_bytes += stats_out->active_bytes +=
(slot_span->num_allocated_slots * stats_out->bucket_slot_size); (slot_span->num_allocated_slots * stats_out->bucket_slot_size);
stats_out->active_count += slot_span->num_allocated_slots;
} }
stats_out->active_count += slot_span->num_allocated_slots;
size_t slot_span_bytes_resident = RoundUpToSystemPage( size_t slot_span_bytes_resident = RoundUpToSystemPage(
(bucket_num_slots - slot_span->num_unprovisioned_slots) * (bucket_num_slots - slot_span->num_unprovisioned_slots) *
@ -684,6 +681,10 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
flags.brp_enabled_ = flags.brp_enabled_ =
opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled; opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
flags.brp_zapping_enabled_ =
opts.backup_ref_ptr_zapping ==
PartitionOptions::BackupRefPtrZapping::kEnabled;
PA_CHECK(!flags.brp_zapping_enabled_ || flags.brp_enabled_);
#else #else
PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled); PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
#endif #endif
@ -828,6 +829,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
internal::SlotSpanMetadata<thread_safe>* slot_span, internal::SlotSpanMetadata<thread_safe>* slot_span,
size_t requested_size) { size_t requested_size) {
PA_DCHECK(slot_span->bucket->is_direct_mapped()); PA_DCHECK(slot_span->bucket->is_direct_mapped());
// Slot-span metadata isn't MTE-tagged.
PA_DCHECK( PA_DCHECK(
internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span))); internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
@ -921,8 +923,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// Write a new trailing cookie. // Write a new trailing cookie.
if (flags.allow_cookie) { if (flags.allow_cookie) {
auto* object = auto* object = static_cast<unsigned char*>(SlotStartToObject(slot_start));
reinterpret_cast<unsigned char*>(SlotStartToObject(slot_start));
internal::PartitionCookieWriteValue(object + internal::PartitionCookieWriteValue(object +
slot_span->GetUsableSize(this)); slot_span->GetUsableSize(this));
} }
@ -933,17 +934,17 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
template <bool thread_safe> template <bool thread_safe>
bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets( bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
void* ptr, void* object,
SlotSpan* slot_span, SlotSpan* slot_span,
size_t new_size) { size_t new_size) {
uintptr_t address = reinterpret_cast<uintptr_t>(ptr); uintptr_t slot_start = ObjectToSlotStart(object);
PA_DCHECK(internal::IsManagedByNormalBuckets(address)); PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we // new size is a significant percentage smaller. We could do the same if we
// determine it is a win. // determine it is a win.
if (AllocationCapacityFromRequestedSize(new_size) != if (AllocationCapacityFromRequestedSize(new_size) !=
AllocationCapacityFromPtr(ptr)) AllocationCapacityFromPtr(object))
return false; return false;
// Trying to allocate |new_size| would use the same amount of underlying // Trying to allocate |new_size| would use the same amount of underlying
@ -951,7 +952,6 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// statistics (and cookie, if present). // statistics (and cookie, if present).
if (slot_span->CanStoreRawSize()) { if (slot_span->CanStoreRawSize()) {
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t slot_start = ObjectToSlotStart(ptr);
internal::PartitionRefCount* old_ref_count; internal::PartitionRefCount* old_ref_count;
if (brp_enabled()) { if (brp_enabled()) {
old_ref_count = internal::PartitionRefCountPointer(slot_start); old_ref_count = internal::PartitionRefCountPointer(slot_start);
@ -972,13 +972,12 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// Write a new trailing cookie only when it is possible to keep track // Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later). // raw size (otherwise we wouldn't know where to look for it later).
if (flags.allow_cookie) { if (flags.allow_cookie) {
internal::PartitionCookieWriteValue( internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
reinterpret_cast<unsigned char*>(address) +
slot_span->GetUsableSize(this)); slot_span->GetUsableSize(this));
} }
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
} }
return ptr; return object;
} }
template <bool thread_safe> template <bool thread_safe>
@ -1176,6 +1175,12 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed); total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed);
stats.total_brp_quarantined_count = stats.total_brp_quarantined_count =
total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed); total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed);
stats.cumulative_brp_quarantined_bytes =
cumulative_size_of_brp_quarantined_bytes.load(
std::memory_order_relaxed);
stats.cumulative_brp_quarantined_count =
cumulative_count_of_brp_quarantined_slots.load(
std::memory_order_relaxed);
#endif #endif
size_t direct_mapped_allocations_total_size = 0; size_t direct_mapped_allocations_total_size = 0;

View File

@ -83,6 +83,14 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
// This type trait verifies a type can be used as a pointer offset.
//
// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
// Smaller types are also allowed.
template <typename Z>
static constexpr bool offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
static constexpr size_t kAllocInfoSize = 1 << 20; static constexpr size_t kAllocInfoSize = 1 << 20;
struct AllocInfo { struct AllocInfo {
@ -171,6 +179,11 @@ struct PartitionOptions {
kEnabled, kEnabled,
}; };
enum class BackupRefPtrZapping : uint8_t {
kDisabled,
kEnabled,
};
enum class UseConfigurablePool : uint8_t { enum class UseConfigurablePool : uint8_t {
kNo, kNo,
kIfAvailable, kIfAvailable,
@ -182,12 +195,14 @@ struct PartitionOptions {
Quarantine quarantine, Quarantine quarantine,
Cookie cookie, Cookie cookie,
BackupRefPtr backup_ref_ptr, BackupRefPtr backup_ref_ptr,
BackupRefPtrZapping backup_ref_ptr_zapping,
UseConfigurablePool use_configurable_pool) UseConfigurablePool use_configurable_pool)
: aligned_alloc(aligned_alloc), : aligned_alloc(aligned_alloc),
thread_cache(thread_cache), thread_cache(thread_cache),
quarantine(quarantine), quarantine(quarantine),
cookie(cookie), cookie(cookie),
backup_ref_ptr(backup_ref_ptr), backup_ref_ptr(backup_ref_ptr),
backup_ref_ptr_zapping(backup_ref_ptr_zapping),
use_configurable_pool(use_configurable_pool) {} use_configurable_pool(use_configurable_pool) {}
AlignedAlloc aligned_alloc; AlignedAlloc aligned_alloc;
@ -195,6 +210,7 @@ struct PartitionOptions {
Quarantine quarantine; Quarantine quarantine;
Cookie cookie; Cookie cookie;
BackupRefPtr backup_ref_ptr; BackupRefPtr backup_ref_ptr;
BackupRefPtrZapping backup_ref_ptr_zapping;
UseConfigurablePool use_configurable_pool; UseConfigurablePool use_configurable_pool;
}; };
@ -241,6 +257,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool allow_cookie; bool allow_cookie;
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
bool brp_enabled_; bool brp_enabled_;
bool brp_zapping_enabled_;
#endif #endif
bool use_configurable_pool; bool use_configurable_pool;
@ -305,6 +322,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
std::atomic<size_t> total_size_of_brp_quarantined_bytes{0}; std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
std::atomic<size_t> total_count_of_brp_quarantined_slots{0}; std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
#endif #endif
// Slot span memory which has been provisioned, and is currently unused as // Slot span memory which has been provisioned, and is currently unused as
// it's part of an empty SlotSpan. This is not clean memory, since it has // it's part of an empty SlotSpan. This is not clean memory, since it has
@ -695,32 +714,21 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return size - flags.extras_size; return size - flags.extras_size;
} }
PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const { PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
// TODO(bartekn): Move MTE tagging here.
// TODO(bartekn): Check that |slot_start| is indeed a slot start. // TODO(bartekn): Check that |slot_start| is indeed a slot start.
return reinterpret_cast<void*>(slot_start + flags.extras_offset); return slot_start + flags.extras_offset;
}
PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return internal::TagAddr(SlotStartToObjectAddr(slot_start));
} }
PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const { PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
// TODO(bartekn): Move MTE untagging here. return UntagPtr(object) - flags.extras_offset;
return reinterpret_cast<uintptr_t>(object) - flags.extras_offset;
// TODO(bartekn): Check that the result is indeed a slot start. // TODO(bartekn): Check that the result is indeed a slot start.
} }
static PA_ALWAYS_INLINE uintptr_t ObjectInnerPtr2Addr(void* object) {
// TODO(bartekn): Add MTE untagging here.
return reinterpret_cast<uintptr_t>(object);
}
static PA_ALWAYS_INLINE uintptr_t ObjectPtr2Addr(void* object) {
// TODO(bartekn): Check that |object| is indeed an object start.
return ObjectInnerPtr2Addr(object);
}
static PA_ALWAYS_INLINE void* SlotStartAddr2Ptr(uintptr_t slot_start) {
// TODO(bartekn): Move MTE tagging here.
// TODO(bartekn): Check that |slot_start| is indeed a slot start.
return reinterpret_cast<void*>(slot_start);
}
bool brp_enabled() const { bool brp_enabled() const {
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
return flags.brp_enabled_; return flags.brp_enabled_;
@ -729,6 +737,14 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif #endif
} }
bool brp_zapping_enabled() const {
#if BUILDFLAG(USE_BACKUP_REF_PTR)
return flags.brp_zapping_enabled_;
#else
return false;
#endif
}
PA_ALWAYS_INLINE bool uses_configurable_pool() const { PA_ALWAYS_INLINE bool uses_configurable_pool() const {
return flags.use_configurable_pool; return flags.use_configurable_pool;
} }
@ -794,7 +810,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return ret; return ret;
} }
// Allocates memory, without initializing extras. // Allocates a memory slot, without initializing extras.
// //
// - |flags| are as in AllocWithFlags(). // - |flags| are as in AllocWithFlags().
// - |raw_size| accommodates for extras on top of AllocWithFlags()'s // - |raw_size| accommodates for extras on top of AllocWithFlags()'s
@ -815,7 +831,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool* is_already_zeroed) bool* is_already_zeroed)
PA_EXCLUSIVE_LOCKS_REQUIRED(lock_); PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool TryReallocInPlaceForNormalBuckets(void* ptr, bool TryReallocInPlaceForNormalBuckets(void* object,
SlotSpan* slot_span, SlotSpan* slot_span,
size_t new_size); size_t new_size);
bool TryReallocInPlaceForDirectMap( bool TryReallocInPlaceForDirectMap(
@ -898,7 +914,8 @@ PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
PartitionDirectMapMetadata<ThreadSafe>::FromSlotSpan(slot_span); PartitionDirectMapMetadata<ThreadSafe>::FromSlotSpan(slot_span);
size_t padding_for_alignment = size_t padding_for_alignment =
metadata->direct_map_extent.padding_for_alignment; metadata->direct_map_extent.padding_for_alignment;
PA_DCHECK(padding_for_alignment == (page - first_page) * PartitionPageSize()); PA_DCHECK(padding_for_alignment ==
static_cast<size_t>(page - first_page) * PartitionPageSize());
PA_DCHECK(slot_start == PA_DCHECK(slot_start ==
reservation_start + PartitionPageSize() + padding_for_alignment); reservation_start + PartitionPageSize() + padding_for_alignment);
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
@ -914,8 +931,6 @@ PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
// ref-count is in place for this allocation. // ref-count is in place for this allocation.
PA_ALWAYS_INLINE uintptr_t PA_ALWAYS_INLINE uintptr_t
PartitionAllocGetSlotStartInBRPPool(uintptr_t address) { PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
address = ::partition_alloc::internal::UnmaskPtr(address);
// Adjust to support pointers right past the end of an allocation, which in // Adjust to support pointers right past the end of an allocation, which in
// some cases appear to point outside the designated allocation slot. // some cases appear to point outside the designated allocation slot.
// //
@ -940,14 +955,11 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
// Get the offset from the beginning of the slot span. // Get the offset from the beginning of the slot span.
uintptr_t slot_span_start = uintptr_t slot_span_start =
SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span); SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
PA_DCHECK(slot_span_start ==
::partition_alloc::internal::UnmaskPtr(slot_span_start));
size_t offset_in_slot_span = address - slot_span_start; size_t offset_in_slot_span = address - slot_span_start;
auto* bucket = slot_span->bucket; auto* bucket = slot_span->bucket;
return ::partition_alloc::internal::RemaskPtr( return slot_span_start +
slot_span_start + bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span));
} }
// Checks whether a given address stays within the same allocation slot after // Checks whether a given address stays within the same allocation slot after
@ -955,8 +967,9 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
// //
// This isn't a general purpose function. The caller is responsible for ensuring // This isn't a general purpose function. The caller is responsible for ensuring
// that the ref-count is in place for this allocation. // that the ref-count is in place for this allocation.
template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address, PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address,
ptrdiff_t delta_in_bytes) { Z delta_in_bytes) {
// Required for pointers right past an allocation. See // Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|. // |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment; uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
@ -974,11 +987,10 @@ PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address,
// Double check that ref-count is indeed present. // Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled()); PA_DCHECK(root->brp_enabled());
void* object = root->SlotStartToObject(slot_start); uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t object_addr = PartitionRoot<ThreadSafe>::ObjectPtr2Addr(object); uintptr_t new_address = address + static_cast<uintptr_t>(delta_in_bytes);
uintptr_t new_address = address + delta_in_bytes;
return object_addr <= new_address && return object_addr <= new_address &&
// We use "greater then or equal" below because we want to include // We use "greater than or equal" below because we want to include
// pointers right past the end of an allocation. // pointers right past the end of an allocation.
new_address <= object_addr + slot_span->GetUsableSize(root); new_address <= object_addr + slot_span->GetUsableSize(root);
} }
@ -994,7 +1006,7 @@ PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
// memset() can be really expensive. // memset() can be really expensive.
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON) #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
DebugMemset(reinterpret_cast<void*>(slot_start), kFreedByte, DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
slot_span->GetUtilizedSlotSize() slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(PartitionRefCount) - sizeof(PartitionRefCount)
@ -1021,9 +1033,8 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
size_t slot_span_alignment, size_t slot_span_alignment,
size_t* usable_size, size_t* usable_size,
bool* is_already_zeroed) { bool* is_already_zeroed) {
PA_DCHECK( PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
(slot_span_alignment >= internal::PartitionPageSize()) && internal::base::bits::IsPowerOfTwo(slot_span_alignment));
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
SlotSpan* slot_span = bucket->active_slot_spans_head; SlotSpan* slot_span = bucket->active_slot_spans_head;
// There always must be a slot span on the active list (could be a sentinel). // There always must be a slot span on the active list (could be a sentinel).
PA_DCHECK(slot_span); PA_DCHECK(slot_span);
@ -1032,7 +1043,7 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
PA_DCHECK(!slot_span->marked_full); PA_DCHECK(!slot_span->marked_full);
uintptr_t slot_start = uintptr_t slot_start =
reinterpret_cast<uintptr_t>(slot_span->get_freelist_head()); internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
// Use the fast path when a slot is readily available on the free list of the // Use the fast path when a slot is readily available on the free list of the
// first active slot span. However, fall back to the slow path if a // first active slot span. However, fall back to the slow path if a
// higher-order alignment is requested, because an inner slot of an existing // higher-order alignment is requested, because an inner slot of an existing
@ -1054,7 +1065,7 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
PA_DCHECK(!slot_span->CanStoreRawSize()); PA_DCHECK(!slot_span->CanStoreRawSize());
PA_DCHECK(!slot_span->bucket->is_direct_mapped()); PA_DCHECK(!slot_span->bucket->is_direct_mapped());
void* entry = slot_span->PopForAlloc(bucket->slot_size); void* entry = slot_span->PopForAlloc(bucket->slot_size);
PA_DCHECK(reinterpret_cast<uintptr_t>(entry) == slot_start); PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
PA_DCHECK(slot_span->bucket == bucket); PA_DCHECK(slot_span->bucket == bucket);
} else { } else {
@ -1118,7 +1129,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
// only cases where we don't would be delayed free() in PCScan, but |*object| // only cases where we don't would be delayed free() in PCScan, but |*object|
// can be cold in cache. // can be cold in cache.
PA_PREFETCH(object); PA_PREFETCH(object);
uintptr_t object_addr = ObjectPtr2Addr(object); uintptr_t object_addr = internal::ObjectPtr2Addr(object);
// On Android, malloc() interception is more fragile than on other // On Android, malloc() interception is more fragile than on other
// platforms, as we use wrapped symbols. However, the GigaCage allows us to // platforms, as we use wrapped symbols. However, the GigaCage allows us to
@ -1154,12 +1165,10 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
const size_t slot_size = slot_span->bucket->slot_size; const size_t slot_size = slot_span->bucket->slot_size;
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) { if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
// TODO(bartekn): |slot_start| shouldn't have MTE tag. internal::TagMemoryRangeIncrement(slot_start, slot_size);
slot_start = ::partition_alloc::internal::TagMemoryRangeIncrement(
slot_start, slot_size);
// Incrementing the MTE-tag in the memory range invalidates the |object|'s // Incrementing the MTE-tag in the memory range invalidates the |object|'s
// tag, so it must be retagged. // tag, so it must be retagged.
object = ::partition_alloc::internal::RemaskPtr(object); object = internal::TagPtr(object);
} }
#else #else
// We are going to read from |*slot_span| in all branches, but haven't done it // We are going to read from |*slot_span| in all branches, but haven't done it
@ -1243,8 +1252,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
if (flags.allow_cookie) { if (flags.allow_cookie) {
// Verify the cookie after the allocated region. // Verify the cookie after the allocated region.
// If this assert fires, you probably corrupted memory. // If this assert fires, you probably corrupted memory.
internal::PartitionCookieCheckValue( internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
reinterpret_cast<unsigned char*>(object) +
slot_span->GetUsableSize(this)); slot_span->GetUsableSize(this));
} }
#endif #endif
@ -1253,11 +1261,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// default. // default.
if (PA_UNLIKELY(IsQuarantineEnabled())) { if (PA_UNLIKELY(IsQuarantineEnabled())) {
if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) { if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
// Mark the state in the state bitmap as freed. // Mark the state in the state bitmap as freed.
internal::StateBitmapFromAddr(unmasked_slot_start) internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
->Free(unmasked_slot_start);
} }
} }
@ -1269,7 +1274,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// If there are no more references to the allocation, it can be freed // If there are no more references to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn // immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes. // potential use-after-free issues into unexploitable crashes.
if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs())) if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
brp_zapping_enabled()))
internal::SecureMemset(object, internal::kQuarantinedByte, internal::SecureMemset(object, internal::kQuarantinedByte,
slot_span->GetUsableSize(this)); slot_span->GetUsableSize(this));
@ -1278,6 +1284,10 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed); slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
total_count_of_brp_quarantined_slots.fetch_add(1, total_count_of_brp_quarantined_slots.fetch_add(1,
std::memory_order_relaxed); std::memory_order_relaxed);
cumulative_size_of_brp_quarantined_bytes.fetch_add(
slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
cumulative_count_of_brp_quarantined_slots.fetch_add(
1, std::memory_order_relaxed);
return; return;
} }
} }
@ -1285,7 +1295,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// memset() can be really expensive. // memset() can be really expensive.
#if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON) #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
internal::DebugMemset(SlotStartAddr2Ptr(slot_start), internal::kFreedByte, internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
internal::kFreedByte,
slot_span->GetUtilizedSlotSize() slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(internal::PartitionRefCount) - sizeof(internal::PartitionRefCount)
@ -1296,7 +1307,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// efficiency. // efficiency.
if (PA_UNLIKELY(internal::RandomPeriod()) && if (PA_UNLIKELY(internal::RandomPeriod()) &&
!IsDirectMappedBucket(slot_span->bucket)) { !IsDirectMappedBucket(slot_span->bucket)) {
internal::SecureMemset(SlotStartAddr2Ptr(slot_start), 0, internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
slot_span->GetUtilizedSlotSize() slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(internal::PartitionRefCount) - sizeof(internal::PartitionRefCount)
@ -1350,7 +1361,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(uintptr_t slot_start,
// RawFreeLocked()). This is intentional, as the thread cache is purged often, // RawFreeLocked()). This is intentional, as the thread cache is purged often,
// and the memory has a consequence the memory has already been touched // and the memory has a consequence the memory has already been touched
// recently (to link the thread cache freelist). // recently (to link the thread cache freelist).
*reinterpret_cast<volatile uintptr_t*>(slot_start) = 0; *static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
0;
// Note: even though we write to slot_start + sizeof(void*) as well, due to // Note: even though we write to slot_start + sizeof(void*) as well, due to
// alignment constraints, the two locations are always going to be in the same // alignment constraints, the two locations are always going to be in the same
// OS page. No need to write to the second one as well. // OS page. No need to write to the second one as well.
@ -1418,7 +1430,6 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan( PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
SlotSpan* slot_span) { SlotSpan* slot_span) {
slot_span = ::partition_alloc::internal::UnmaskPtr(slot_span);
PartitionRoot* root = FromSlotSpan(slot_span); PartitionRoot* root = FromSlotSpan(slot_span);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root); return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
} }
@ -1850,11 +1861,8 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
// default. // default.
if (PA_UNLIKELY(is_quarantine_enabled)) { if (PA_UNLIKELY(is_quarantine_enabled)) {
if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) { if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
// Mark the corresponding bits in the state bitmap as allocated. // Mark the corresponding bits in the state bitmap as allocated.
internal::StateBitmapFromAddr(unmasked_slot_start) internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
->Allocate(unmasked_slot_start);
} }
} }
@ -1957,8 +1965,7 @@ PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
// |alignment| is a power of two, but the compiler doesn't necessarily know // |alignment| is a power of two, but the compiler doesn't necessarily know
// that. A regular % operation is very slow, make sure to use the equivalent, // that. A regular % operation is very slow, make sure to use the equivalent,
// faster form. // faster form.
// No need to call ObjectPtr2Addr, because MTE untagging isn't necessary, as // No need to MTE-untag, as it doesn't change alignment.
// it doesn't change alignment.
PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1))); PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
return object; return object;
@ -2021,30 +2028,12 @@ using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
static_assert(offsetof(ThreadSafePartitionRoot, lock_) == static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
internal::kPartitionCachelineSize, internal::kPartitionCachelineSize,
"Padding is incorrect"); "Padding is incorrect");
} // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionOptions;
using ::partition_alloc::PurgeFlags;
using ::partition_alloc::ThreadSafePartitionRoot;
namespace internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::ScopedSyscallTimer;
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::PartitionAllocFreeForRefCounting; // Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool; using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
using ::partition_alloc::internal::PartitionAllocIsValidPtrDelta;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR) #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
} // namespace internal } // namespace partition_alloc
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_

View File

@ -59,6 +59,10 @@ struct PartitionMemoryStats {
total_brp_quarantined_bytes; // Total bytes that are quarantined by BRP. total_brp_quarantined_bytes; // Total bytes that are quarantined by BRP.
size_t total_brp_quarantined_count; // Total number of slots that are size_t total_brp_quarantined_count; // Total number of slots that are
// quarantined by BRP. // quarantined by BRP.
size_t cumulative_brp_quarantined_bytes; // Cumulative bytes that are
// quarantined by BRP.
size_t cumulative_brp_quarantined_count; // Cumulative number of slots that
// are quarantined by BRP.
#endif #endif
bool has_thread_cache; bool has_thread_cache;
@ -129,16 +133,4 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) SimplePartitionStatsDumper
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::PartitionBucketMemoryStats;
using ::partition_alloc::PartitionMemoryStats;
using ::partition_alloc::PartitionStatsDumper;
using ::partition_alloc::SimplePartitionStatsDumper;
using ::partition_alloc::ThreadCacheStats;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_STATS_H_

View File

@ -48,12 +48,15 @@ PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
size_t offset_in_bitmap = offset_in_super_page >> size_t offset_in_bitmap = offset_in_super_page >>
internal::tag_bitmap::kBytesPerPartitionTagShift internal::tag_bitmap::kBytesPerPartitionTagShift
<< internal::tag_bitmap::kPartitionTagSizeShift; << internal::tag_bitmap::kPartitionTagSizeShift;
// No need to tag, as the tag bitmap region isn't protected by MTE.
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap); return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
} }
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) { PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
return PartitionTagPointer( // Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
internal::UnmaskPtr(reinterpret_cast<uintptr_t>(ptr))); // from the pointer. Whereas, PartitionTagPointer relates to software MTE
// (i.e. MTECheckedPtr) and it returns a pointer to the tag in memory.
return PartitionTagPointer(UntagPtr(ptr));
} }
namespace internal { namespace internal {
@ -75,7 +78,10 @@ PA_ALWAYS_INLINE void PartitionTagSetValue(uintptr_t addr,
PA_ALWAYS_INLINE void PartitionTagSetValue(void* ptr, PA_ALWAYS_INLINE void PartitionTagSetValue(void* ptr,
size_t size, size_t size,
PartitionTag value) { PartitionTag value) {
PartitionTagSetValue(reinterpret_cast<uintptr_t>(ptr), size, value); // Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagSetValue relates to software MTE
// (i.e. MTECheckedPtr) and it sets the in-memory tag.
PartitionTagSetValue(UntagPtr(ptr), size, value);
} }
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) { PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {

View File

@ -34,7 +34,7 @@ using PartitionTlsKey = pthread_key_t;
#if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64) #if BUILDFLAG(IS_MAC) && defined(ARCH_CPU_X86_64)
namespace { namespace {
PA_ALWAYS_INLINE void* FastTlsGet(intptr_t index) { PA_ALWAYS_INLINE void* FastTlsGet(PartitionTlsKey index) {
// On macOS, pthread_getspecific() is in libSystem, so a call to it has to go // On macOS, pthread_getspecific() is in libSystem, so a call to it has to go
// through PLT. However, and contrary to some other platforms, *all* TLS keys // through PLT. However, and contrary to some other platforms, *all* TLS keys
// are in a static array in the thread structure. So they are *always* at a // are in a static array in the thread structure. So they are *always* at a
@ -53,7 +53,10 @@ PA_ALWAYS_INLINE void* FastTlsGet(intptr_t index) {
// This function is essentially inlining the content of pthread_getspecific() // This function is essentially inlining the content of pthread_getspecific()
// here. // here.
intptr_t result; intptr_t result;
asm("movq %%gs:(,%1,8), %0;" : "=r"(result) : "r"(index)); static_assert(sizeof index <= sizeof(intptr_t));
asm("movq %%gs:(,%1,8), %0;"
: "=r"(result)
: "r"(static_cast<intptr_t>(index)));
return reinterpret_cast<void*>(result); return reinterpret_cast<void*>(result);
} }
@ -140,18 +143,4 @@ PA_ALWAYS_INLINE void PartitionTlsSet(PartitionTlsKey key, void* value) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::PartitionTlsCreate;
using ::partition_alloc::internal::PartitionTlsGet;
using ::partition_alloc::internal::PartitionTlsKey;
using ::partition_alloc::internal::PartitionTlsSet;
#if BUILDFLAG(IS_WIN)
using ::partition_alloc::internal::PartitionTlsSetOnDllProcessDetach;
#endif // BUILDFLAG(IS_WIN)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_

View File

@ -146,7 +146,6 @@ PA_ALWAYS_INLINE const uint16_t* GetReservationOffsetTableEnd(
PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) { PA_ALWAYS_INLINE uint16_t* ReservationOffsetPointer(uintptr_t address) {
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// In 64-bit mode, find the owning Pool and compute the offset from its base. // In 64-bit mode, find the owning Pool and compute the offset from its base.
address = ::partition_alloc::internal::UnmaskPtr(address);
auto [pool, offset] = GetPoolAndOffset(address); auto [pool, offset] = GetPoolAndOffset(address);
return ReservationOffsetPointer(pool, offset); return ReservationOffsetPointer(pool, offset);
#else #else
@ -254,21 +253,4 @@ PA_ALWAYS_INLINE bool IsManagedByNormalBucketsOrDirectMap(uintptr_t address) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::GetDirectMapReservationStart;
using ::partition_alloc::internal::GetReservationOffsetTable;
using ::partition_alloc::internal::GetReservationOffsetTableEnd;
using ::partition_alloc::internal::IsManagedByDirectMap;
using ::partition_alloc::internal::IsManagedByNormalBuckets;
using ::partition_alloc::internal::IsManagedByNormalBucketsOrDirectMap;
using ::partition_alloc::internal::IsReservationStart;
using ::partition_alloc::internal::kOffsetTagNormalBuckets;
using ::partition_alloc::internal::kOffsetTagNotAllocated;
using ::partition_alloc::internal::ReservationOffsetPointer;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_RESERVATION_OFFSET_TABLE_H_

View File

@ -17,6 +17,7 @@ constexpr PartitionOptions kConfig{
PartitionOptions::Quarantine::kDisallowed, PartitionOptions::Quarantine::kDisallowed,
PartitionOptions::Cookie::kAllowed, PartitionOptions::Cookie::kAllowed,
PartitionOptions::BackupRefPtr::kDisabled, PartitionOptions::BackupRefPtr::kDisabled,
PartitionOptions::BackupRefPtrZapping::kDisabled,
PartitionOptions::UseConfigurablePool::kNo, PartitionOptions::UseConfigurablePool::kNo,
}; };
} // namespace } // namespace

View File

@ -80,16 +80,4 @@ struct PCScanMetadataDeleter final {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::AllocatedOnPCScanMetadataPartition;
using ::partition_alloc::internal::MakePCScanMetadata;
using ::partition_alloc::internal::MetadataAllocator;
using ::partition_alloc::internal::PCScanMetadataAllocator;
using ::partition_alloc::internal::PCScanMetadataDeleter;
using ::partition_alloc::internal::ReinitPCScanMetadataAllocatorForTesting;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_

View File

@ -247,15 +247,12 @@ PA_ALWAYS_INLINE void PCScan::MoveToQuarantine(void* object,
SecureMemset(object, 0, usable_size); SecureMemset(object, 0, usable_size);
} }
// TODO(bartekn): Remove MTE untagging, once its done in the caller. auto* state_bitmap = StateBitmapFromAddr(slot_start);
uintptr_t unmasked_slot_start =
::partition_alloc::internal::UnmaskPtr(slot_start);
auto* state_bitmap = StateBitmapFromAddr(unmasked_slot_start);
// Mark the state in the state bitmap as quarantined. Make sure to do it after // Mark the state in the state bitmap as quarantined. Make sure to do it after
// the clearing to avoid racing with *Scan Sweeper. // the clearing to avoid racing with *Scan Sweeper.
[[maybe_unused]] const bool succeeded = [[maybe_unused]] const bool succeeded =
state_bitmap->Quarantine(unmasked_slot_start, instance.epoch()); state_bitmap->Quarantine(slot_start, instance.epoch());
#if PA_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED #if PA_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED
if (PA_UNLIKELY(!succeeded)) if (PA_UNLIKELY(!succeeded))
DoubleFreeAttempt(); DoubleFreeAttempt();
@ -281,10 +278,4 @@ inline PCScanScheduler& PCScan::scheduler() {
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::PCScan;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_

View File

@ -142,7 +142,6 @@ class QuarantineCardTable final {
// slots. May return false positives for but should never return false // slots. May return false positives for but should never return false
// negatives, as otherwise this breaks security. // negatives, as otherwise this breaks security.
PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const { PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
address = ::partition_alloc::internal::UnmaskPtr(address);
const size_t byte = Byte(address); const size_t byte = Byte(address);
PA_SCAN_DCHECK(byte < bytes_.size()); PA_SCAN_DCHECK(byte < bytes_.size());
return bytes_[byte]; return bytes_[byte];
@ -653,6 +652,7 @@ PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
PA_SCAN_INLINE size_t PA_SCAN_INLINE size_t
PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const { PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
// Check if |maybe_ptr| points somewhere to the heap. // Check if |maybe_ptr| points somewhere to the heap.
// The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr); auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
if (!state_map) if (!state_map)
return 0; return 0;
@ -722,8 +722,7 @@ void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
// ScanPartitions. // ScanPartitions.
const size_t size = slot_span->GetUsableSize(root); const size_t size = slot_span->GetUsableSize(root);
if (clear_type == PCScan::ClearType::kLazy) { if (clear_type == PCScan::ClearType::kLazy) {
void* object = ::partition_alloc::internal::RemaskPtr( void* object = root->SlotStartToObject(slot_start);
root->SlotStartToObject(slot_start));
memset(object, 0, size); memset(object, 0, size);
} }
#if PA_STARSCAN_USE_CARD_TABLE #if PA_STARSCAN_USE_CARD_TABLE
@ -774,9 +773,10 @@ class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
} }
#endif // defined(PA_HAS_64_BITS_POINTERS) #endif // defined(PA_HAS_64_BITS_POINTERS)
PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr) { PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
quarantine_size_ += task_.TryMarkSlotInNormalBuckets( // |maybe_ptr| may have an MTE tag, so remove it first.
::partition_alloc::internal::UnmaskPtr(maybe_ptr)); quarantine_size_ +=
task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
} }
const PCScanTask& task_; const PCScanTask& task_;
@ -937,8 +937,6 @@ void UnmarkInCardTable(uintptr_t slot_start,
SlotSpanMetadata<ThreadSafe>* slot_span, SlotSpanMetadata<ThreadSafe>* slot_span,
uintptr_t slot_start) { uintptr_t slot_start) {
void* object = root->SlotStartToObject(slot_start); void* object = root->SlotStartToObject(slot_start);
// TODO(bartekn): Move MTE masking into SlotStartToObject.
object = ::partition_alloc::internal::RemaskPtr(object);
root->FreeNoHooksImmediate(object, slot_span, slot_start); root->FreeNoHooksImmediate(object, slot_span, slot_start);
UnmarkInCardTable(slot_start, slot_span); UnmarkInCardTable(slot_start, slot_span);
return slot_span->bucket->slot_size; return slot_span->bucket->slot_size;
@ -1003,8 +1001,7 @@ void UnmarkInCardTable(uintptr_t slot_start,
const auto bitmap_iterator = [&](uintptr_t slot_start) { const auto bitmap_iterator = [&](uintptr_t slot_start) {
SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start); SlotSpan* current_slot_span = SlotSpan::FromSlotStart(slot_start);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull( auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
::partition_alloc::internal::RemaskPtr(slot_start));
if (current_slot_span != previous_slot_span) { if (current_slot_span != previous_slot_span) {
// We started scanning a new slot span. Flush the accumulated freelist to // We started scanning a new slot span. Flush the accumulated freelist to

View File

@ -20,11 +20,8 @@
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h" #include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
#include "base/allocator/partition_allocator/starscan/write_protector.h" #include "base/allocator/partition_allocator/starscan/write_protector.h"
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace partition_alloc::internal { namespace partition_alloc::internal {
class StarScanSnapshot;
class PCScanTask; class PCScanTask;
// Internal PCScan singleton. The separation between frontend and backend is // Internal PCScan singleton. The separation between frontend and backend is
@ -111,7 +108,7 @@ class PCScanInternal final {
private: private:
friend internal::base::NoDestructor<PCScanInternal>; friend internal::base::NoDestructor<PCScanInternal>;
friend class partition_alloc::internal::StarScanSnapshot; friend class StarScanSnapshot;
using StackTops = std::unordered_map< using StackTops = std::unordered_map<
internal::base::PlatformThreadId, internal::base::PlatformThreadId,
@ -149,11 +146,4 @@ class PCScanInternal final {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::PCScanInternal;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_

View File

@ -33,8 +33,7 @@ void PCScanSchedulingBackend::EnableScheduling() {
scheduling_enabled_.store(true, std::memory_order_relaxed); scheduling_enabled_.store(true, std::memory_order_relaxed);
// Check if *Scan needs to be run immediately. // Check if *Scan needs to be run immediately.
if (NeedsToImmediatelyScan()) if (NeedsToImmediatelyScan())
::base::internal::PCScan::PerformScan( PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
::base::internal::PCScan::InvocationMode::kNonBlocking);
} }
size_t PCScanSchedulingBackend::ScanStarted() { size_t PCScanSchedulingBackend::ScanStarted() {

View File

@ -198,14 +198,4 @@ bool PCScanScheduler::AccountFreed(size_t size) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::LimitBackend;
using ::partition_alloc::internal::MUAwareTaskBasedBackend;
using ::partition_alloc::internal::PCScanScheduler;
using ::partition_alloc::internal::QuarantineData;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_

View File

@ -139,9 +139,4 @@ void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::RacefulWorklist;
}
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h" #include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(ARCH_CPU_X86_64) #if defined(ARCH_CPU_X86_64)
@ -92,7 +93,9 @@ void ScanLoop<Derived>::RunUnvectorized(uintptr_t begin, uintptr_t end) {
PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(begin % sizeof(uintptr_t)));
PA_SCAN_DCHECK(!(end % sizeof(uintptr_t))); PA_SCAN_DCHECK(!(end % sizeof(uintptr_t)));
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
const uintptr_t mask = Derived::CageMask(); // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |mask| to untag, for efficiency.
const uintptr_t mask = Derived::CageMask() & kPtrUntagMask;
const uintptr_t base = Derived::CageBase(); const uintptr_t base = Derived::CageBase();
#endif #endif
for (; begin < end; begin += sizeof(uintptr_t)) { for (; begin < end; begin += sizeof(uintptr_t)) {
@ -126,7 +129,10 @@ __attribute__((target("avx2"))) void ScanLoop<Derived>::RunAVX2(uintptr_t begin,
// vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd // vmovdqa (_mm256_load_si256) is twice smaller (0.25) than that of vmovapd
// (_mm256_load_pd). // (_mm256_load_pd).
const __m256i vbase = _mm256_set1_epi64x(derived().CageBase()); const __m256i vbase = _mm256_set1_epi64x(derived().CageBase());
const __m256i cage_mask = _mm256_set1_epi64x(derived().CageMask()); // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m256i cage_mask =
_mm256_set1_epi64x(derived().CageMask() & kPtrUntagMask);
static_assert(sizeof(__m256i) == kBytesInVector); static_assert(sizeof(__m256i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
@ -162,7 +168,10 @@ __attribute__((target("sse4.1"))) void ScanLoop<Derived>::RunSSE4(
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t); static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement)); PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const __m128i vbase = _mm_set1_epi64x(derived().CageBase()); const __m128i vbase = _mm_set1_epi64x(derived().CageBase());
const __m128i cage_mask = _mm_set1_epi64x(derived().CageMask()); // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const __m128i cage_mask =
_mm_set1_epi64x(derived().CageMask() & kPtrUntagMask);
static_assert(sizeof(__m128i) == kBytesInVector); static_assert(sizeof(__m128i) == kBytesInVector);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
@ -200,7 +209,10 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t); static constexpr size_t kBytesInVector = kWordsInVector * sizeof(uintptr_t);
PA_SCAN_DCHECK(!(begin % kAlignmentRequirement)); PA_SCAN_DCHECK(!(begin % kAlignmentRequirement));
const uint64x2_t vbase = vdupq_n_u64(derived().CageBase()); const uint64x2_t vbase = vdupq_n_u64(derived().CageBase());
const uint64x2_t cage_mask = vdupq_n_u64(derived().CageMask()); // If the read value is a pointer into the PA region, it's likely
// MTE-tagged. Piggyback on |cage_mask| to untag, for efficiency.
const uint64x2_t cage_mask =
vdupq_n_u64(derived().CageMask() & kPtrUntagMask);
for (; begin <= (end - kBytesInVector); begin += kBytesInVector) { for (; begin <= (end - kBytesInVector); begin += kBytesInVector) {
// Keep it MTE-untagged. See DisableMTEScope for details. // Keep it MTE-untagged. See DisableMTEScope for details.
@ -224,11 +236,4 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::ScanLoop;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_

View File

@ -91,11 +91,4 @@ StarScanSnapshot::UnprotectingView::UnprotectingView(StarScanSnapshot& snapshot)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::StarScanSnapshot;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_

View File

@ -45,14 +45,4 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) Stack final {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::GetStackPointer;
using ::partition_alloc::internal::GetStackTop;
using ::partition_alloc::internal::Stack;
using ::partition_alloc::internal::StackVisitor;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_

View File

@ -27,12 +27,4 @@ enum class SimdSupport : uint8_t {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::Context;
using ::partition_alloc::internal::SimdSupport;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_

View File

@ -245,11 +245,4 @@ inline StatsCollector::MetadataString StatsCollector::ToUMAString(
} // namespace internal } // namespace internal
} // namespace partition_alloc } // namespace partition_alloc
// TODO(crbug.com/1151236): Remove this when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::StatsCollector;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_

View File

@ -5,24 +5,22 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "base/allocator/partition_allocator/starscan/stats_collector.h" #include "base/allocator/partition_allocator/starscan/stats_collector.h"
namespace partition_alloc { namespace partition_alloc {
static_assert(sizeof(uint32_t) >= sizeof(internal::base::PlatformThreadId),
"sizeof(tid) must be larger than sizeof(PlatformThreadId)");
// StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, // StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1,
// and UmaHistogramTimes. It is used to just remove trace_log and uma // and UmaHistogramTimes. It is used to just remove trace_log and uma
// dependencies from partition allocator. // dependencies from partition allocator.
class StatsReporter { class StatsReporter {
public: public:
virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id, virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
uint32_t tid, internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) {} int64_t end_time_ticks_internal_value) {}
virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id, virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
uint32_t tid, internal::base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, int64_t start_time_ticks_internal_value,
int64_t end_time_ticks_internal_value) {} int64_t end_time_ticks_internal_value) {}

View File

@ -27,9 +27,8 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
::base::internal::PCScan::ClearType NoWriteProtector::SupportedClearType() PCScan::ClearType NoWriteProtector::SupportedClearType() const {
const { return PCScan::ClearType::kLazy;
return ::base::internal::PCScan::ClearType::kLazy;
} }
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED) #if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
@ -58,7 +57,7 @@ void UserFaultFDThread(int uffd) {
// Enter the safepoint. Concurrent faulted writes will wait until safepoint // Enter the safepoint. Concurrent faulted writes will wait until safepoint
// finishes. // finishes.
::base::internal::PCScan::JoinScanIfNeeded(); PCScan::JoinScanIfNeeded();
} }
} }
} // namespace } // namespace
@ -121,10 +120,8 @@ void UserFaultFDWriteProtector::UnprotectPages(uintptr_t begin, size_t length) {
UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect); UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect);
} }
::base::internal::PCScan::ClearType PCScan::ClearType UserFaultFDWriteProtector::SupportedClearType() const {
UserFaultFDWriteProtector::SupportedClearType() const { return IsSupported() ? PCScan::ClearType::kEager : PCScan::ClearType::kLazy;
return IsSupported() ? ::base::internal::PCScan::ClearType::kEager
: ::base::internal::PCScan::ClearType::kLazy;
} }
bool UserFaultFDWriteProtector::IsSupported() const { bool UserFaultFDWriteProtector::IsSupported() const {

View File

@ -28,14 +28,14 @@ class WriteProtector : public AllocatedOnPCScanMetadataPartition {
virtual bool IsEnabled() const = 0; virtual bool IsEnabled() const = 0;
virtual ::base::internal::PCScan::ClearType SupportedClearType() const = 0; virtual PCScan::ClearType SupportedClearType() const = 0;
}; };
class NoWriteProtector final : public WriteProtector { class NoWriteProtector final : public WriteProtector {
public: public:
void ProtectPages(uintptr_t, size_t) final {} void ProtectPages(uintptr_t, size_t) final {}
void UnprotectPages(uintptr_t, size_t) final {} void UnprotectPages(uintptr_t, size_t) final {}
::base::internal::PCScan::ClearType SupportedClearType() const final; PCScan::ClearType SupportedClearType() const final;
inline bool IsEnabled() const override; inline bool IsEnabled() const override;
}; };
@ -55,7 +55,7 @@ class UserFaultFDWriteProtector final : public WriteProtector {
void ProtectPages(uintptr_t, size_t) final; void ProtectPages(uintptr_t, size_t) final;
void UnprotectPages(uintptr_t, size_t) final; void UnprotectPages(uintptr_t, size_t) final;
::base::internal::PCScan::ClearType SupportedClearType() const final; PCScan::ClearType SupportedClearType() const final;
inline bool IsEnabled() const override; inline bool IsEnabled() const override;
@ -73,15 +73,4 @@ bool UserFaultFDWriteProtector::IsEnabled() const {
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
// TODO(crbug.com/1288247): Remove these when migration is complete.
namespace base::internal {
using ::partition_alloc::internal::NoWriteProtector;
using ::partition_alloc::internal::WriteProtector;
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
using ::partition_alloc::internal::UserFaultFDWriteProtector;
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_

View File

@ -39,10 +39,11 @@ namespace internal {
constexpr int kMemTagGranuleSize = 16u; constexpr int kMemTagGranuleSize = 16u;
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
constexpr uint64_t kMemTagUnmask = 0x00ffffffffffffffuLL; constexpr uint64_t kPtrTagMask = 0xff00000000000000uLL;
#else #else
constexpr uint64_t kMemTagUnmask = 0xffffffffffffffffuLL; constexpr uint64_t kPtrTagMask = 0;
#endif // defined(PA_HAS_MEMORY_TAGGING) #endif // defined(PA_HAS_MEMORY_TAGGING)
constexpr uint64_t kPtrUntagMask = ~kPtrTagMask;
#if BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_ANDROID)
// Changes the memory tagging mode for all threads in the current process. // Changes the memory tagging mode for all threads in the current process.
@ -86,6 +87,7 @@ extern PA_COMPONENT_EXPORT(PARTITION_ALLOC)
// (e.g. free). Returns the pointer with the new tag. Ensures that the entire // (e.g. free). Returns the pointer with the new tag. Ensures that the entire
// range is set to the same tag. // range is set to the same tag.
// TODO(bartekn): Remove the T* variant. // TODO(bartekn): Remove the T* variant.
// TODO(bartekn): Consider removing the return value.
template <typename T> template <typename T>
PA_ALWAYS_INLINE T* TagMemoryRangeIncrement(T* ptr, size_t size) { PA_ALWAYS_INLINE T* TagMemoryRangeIncrement(T* ptr, size_t size) {
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
@ -94,9 +96,8 @@ PA_ALWAYS_INLINE T* TagMemoryRangeIncrement(T* ptr, size_t size) {
return ptr; return ptr;
#endif #endif
} }
PA_ALWAYS_INLINE uintptr_t TagMemoryRangeIncrement(uintptr_t ptr, size_t size) { PA_ALWAYS_INLINE void* TagMemoryRangeIncrement(uintptr_t ptr, size_t size) {
return reinterpret_cast<uintptr_t>( return TagMemoryRangeIncrement(reinterpret_cast<void*>(ptr), size);
TagMemoryRangeIncrement(reinterpret_cast<void*>(ptr), size));
} }
// Randomly changes the tag of the ptr memory range. Useful for initial random // Randomly changes the tag of the ptr memory range. Useful for initial random
@ -114,46 +115,45 @@ PA_ALWAYS_INLINE T* TagMemoryRangeRandomly(T* ptr,
return ptr; return ptr;
#endif #endif
} }
PA_ALWAYS_INLINE uintptr_t TagMemoryRangeRandomly(uintptr_t ptr, PA_ALWAYS_INLINE void* TagMemoryRangeRandomly(uintptr_t ptr,
size_t size, size_t size,
uint64_t mask = 0u) { uint64_t mask = 0u) {
return reinterpret_cast<uintptr_t>( return TagMemoryRangeRandomly(reinterpret_cast<void*>(ptr), size, mask);
TagMemoryRangeRandomly(reinterpret_cast<void*>(ptr), size, mask));
} }
// Gets a version of ptr that's safe to dereference. // Gets a version of ptr that's safe to dereference.
// TODO(bartekn): Remove the T* variant.
template <typename T> template <typename T>
PA_ALWAYS_INLINE T* RemaskPtr(T* ptr) { PA_ALWAYS_INLINE T* TagPtr(T* ptr) {
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
return reinterpret_cast<T*>(global_remask_void_ptr_fn(ptr)); return reinterpret_cast<T*>(global_remask_void_ptr_fn(ptr));
#else #else
return ptr; return ptr;
#endif #endif
} }
// Gets a version of address that's safe to dereference, if cast to a pointer.
PA_ALWAYS_INLINE uintptr_t RemaskPtr(uintptr_t address) { // Gets a version of |address| that's safe to dereference, and casts to a
return reinterpret_cast<uintptr_t>( // pointer.
RemaskPtr(reinterpret_cast<void*>(address))); PA_ALWAYS_INLINE void* TagAddr(uintptr_t address) {
return TagPtr(reinterpret_cast<void*>(address));
} }
// Strips the tag bits off address. // Strips the tag bits off |address|.
PA_ALWAYS_INLINE uintptr_t UnmaskPtr(uintptr_t address) { PA_ALWAYS_INLINE uintptr_t UntagAddr(uintptr_t address) {
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
return address & kMemTagUnmask; return address & internal::kPtrUntagMask;
#else #else
return address; return address;
#endif #endif
} }
// Strips the tag bits off ptr.
// TODO(bartekn): Remove the T* variant.
template <typename T>
PA_ALWAYS_INLINE T* UnmaskPtr(T* ptr) {
return reinterpret_cast<T*>(UnmaskPtr(reinterpret_cast<uintptr_t>(ptr)));
}
} // namespace internal } // namespace internal
// Strips the tag bits off |ptr|.
template <typename T>
PA_ALWAYS_INLINE uintptr_t UntagPtr(T* ptr) {
return internal::UntagAddr(reinterpret_cast<uintptr_t>(ptr));
}
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_TAGGING_H_

View File

@ -10,6 +10,7 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h" #include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -452,7 +453,8 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size, uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
internal::PartitionPageSize(), &usable_size, internal::PartitionPageSize(), &usable_size,
&already_zeroed); &already_zeroed);
ThreadCache* tcache = new (reinterpret_cast<void*>(buffer)) ThreadCache(root); ThreadCache* tcache =
new (internal::SlotStartAddr2Ptr(buffer)) ThreadCache(root);
// This may allocate. // This may allocate.
internal::PartitionTlsSet(internal::g_thread_cache_key, tcache); internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
@ -519,12 +521,16 @@ void ThreadCache::Delete(void* tcache_ptr) {
auto* root = tcache->root_; auto* root = tcache->root_;
tcache->~ThreadCache(); tcache->~ThreadCache();
root->RawFree(reinterpret_cast<uintptr_t>(tcache_ptr)); // TreadCache was allocated using RawAlloc() and SlotStartAddr2Ptr(), so it
// shifted by extras, but is MTE-tagged.
root->RawFree(internal::SlotStartPtr2Addr(tcache_ptr));
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
// On Windows, allocations do occur during thread/process teardown, make sure // On Windows, allocations do occur during thread/process teardown, make sure
// they don't resurrect the thread cache. // they don't resurrect the thread cache.
// //
// Don't MTE-tag, as it'd mess with the sentinel value.
//
// TODO(lizeb): Investigate whether this is needed on POSIX as well. // TODO(lizeb): Investigate whether this is needed on POSIX as well.
internal::PartitionTlsSet(internal::g_thread_cache_key, internal::PartitionTlsSet(internal::g_thread_cache_key,
reinterpret_cast<void*>(kTombstone)); reinterpret_cast<void*>(kTombstone));
@ -679,7 +685,7 @@ void ThreadCache::FreeAfter(internal::PartitionFreelistEntry* head,
// acquisitions can be expensive. // acquisitions can be expensive.
internal::ScopedGuard guard(root_->lock_); internal::ScopedGuard guard(root_->lock_);
while (head) { while (head) {
uintptr_t slot_start = reinterpret_cast<uintptr_t>(head); uintptr_t slot_start = internal::SlotStartPtr2Addr(head);
head = head->GetNextForThreadCache<crash_on_corruption>(slot_size); head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
root_->RawFreeLocked(slot_start); root_->RawFreeLocked(slot_start);
} }

View File

@ -10,6 +10,7 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
@ -247,16 +248,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
#if defined(PA_THREAD_CACHE_FAST_TLS) #if defined(PA_THREAD_CACHE_FAST_TLS)
return internal::g_thread_cache; return internal::g_thread_cache;
#else #else
// This region isn't MTE-tagged.
return reinterpret_cast<ThreadCache*>( return reinterpret_cast<ThreadCache*>(
internal::PartitionTlsGet(internal::g_thread_cache_key)); internal::PartitionTlsGet(internal::g_thread_cache_key));
#endif #endif
} }
static bool IsValid(ThreadCache* tcache) { static bool IsValid(ThreadCache* tcache) {
// Do not MTE-untag, as it'd mess up the sentinel value.
return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask; return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask;
} }
static bool IsTombstone(ThreadCache* tcache) { static bool IsTombstone(ThreadCache* tcache) {
// Do not MTE-untag, as it'd mess up the sentinel value.
return reinterpret_cast<uintptr_t>(tcache) == kTombstone; return reinterpret_cast<uintptr_t>(tcache) == kTombstone;
} }
@ -527,14 +531,14 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
} }
PA_DCHECK(bucket.count != 0); PA_DCHECK(bucket.count != 0);
internal::PartitionFreelistEntry* result = bucket.freelist_head; internal::PartitionFreelistEntry* entry = bucket.freelist_head;
// Passes the bucket size to |GetNext()|, so that in case of freelist // Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to // corruption, we know the bucket size that lead to the crash, helping to
// narrow down the search for culprit. |bucket| was touched just now, so this // narrow down the search for culprit. |bucket| was touched just now, so this
// does not introduce another cache miss. // does not introduce another cache miss.
internal::PartitionFreelistEntry* next = internal::PartitionFreelistEntry* next =
result->GetNextForThreadCache<true>(bucket.slot_size); entry->GetNextForThreadCache<true>(bucket.slot_size);
PA_DCHECK(result != next); PA_DCHECK(entry != next);
bucket.count--; bucket.count--;
PA_DCHECK(bucket.count != 0 || !next); PA_DCHECK(bucket.count != 0 || !next);
bucket.freelist_head = next; bucket.freelist_head = next;
@ -542,7 +546,7 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
PA_DCHECK(cached_memory_ >= bucket.slot_size); PA_DCHECK(cached_memory_ >= bucket.slot_size);
cached_memory_ -= bucket.slot_size; cached_memory_ -= bucket.slot_size;
return reinterpret_cast<uintptr_t>(result); return internal::SlotStartPtr2Addr(entry);
} }
PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket, PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
@ -565,19 +569,23 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
static_assert(internal::kAlignment == 16, ""); static_assert(internal::kAlignment == 16, "");
#if PA_HAS_BUILTIN(__builtin_assume_aligned) #if PA_HAS_BUILTIN(__builtin_assume_aligned)
uintptr_t address = reinterpret_cast<uintptr_t>(__builtin_assume_aligned( // Cast back to uintptr_t, because we need it for pointer arithmetic. Make
reinterpret_cast<void*>(slot_start), internal::kAlignment)); // sure it gets MTE-tagged, as we cast it later to a pointer and dereference.
uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(__builtin_assume_aligned(
internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment));
#else #else
uintptr_t address = slot_start; uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(internal::SlotStartAddr2Ptr(slot_start));
#endif #endif
// The pointer is always 16 bytes aligned, so its start address is always == 0 // The pointer is always 16 bytes aligned, so its start address is always == 0
// % 16. Its distance to the next cacheline is 64 - ((address & 63) / 16) * // % 16. Its distance to the next cacheline is `64 - ((address_tagged & 63) /
// 16. // 16) * 16`.
static_assert( static_assert(
internal::kPartitionCachelineSize == 64, internal::kPartitionCachelineSize == 64,
"The computation below assumes that cache lines are 64 bytes long."); "The computation below assumes that cache lines are 64 bytes long.");
int distance_to_next_cacheline_in_16_bytes = 4 - ((address >> 4) & 3); int distance_to_next_cacheline_in_16_bytes = 4 - ((address_tagged >> 4) & 3);
int slot_size_remaining_in_16_bytes = int slot_size_remaining_in_16_bytes =
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// When BRP is on in the "previous slot" mode, this slot may have a BRP // When BRP is on in the "previous slot" mode, this slot may have a BRP
@ -591,10 +599,10 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
slot_size_remaining_in_16_bytes = std::min( slot_size_remaining_in_16_bytes = std::min(
slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes); slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes);
static const uint32_t poison_16_bytes[4] = {0xdeadbeef, 0xdeadbeef, static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
0xdeadbeef, 0xdeadbeef}; 0xbadbad00, 0xbadbad00};
uint32_t* address_aligned = reinterpret_cast<uint32_t*>(address); // Already MTE-tagged above, so safe to dereference.
uint32_t* address_aligned = reinterpret_cast<uint32_t*>(address_tagged);
for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) { for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
// Clang will expand the memcpy to a 16-byte write (movups on x86). // Clang will expand the memcpy to a 16-byte write (movups on x86).
memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes)); memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));
@ -611,13 +619,4 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
} // namespace partition_alloc } // namespace partition_alloc
namespace base::internal {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::ThreadCache;
using ::partition_alloc::ThreadCacheRegistry;
} // namespace base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_

View File

@ -17,6 +17,7 @@
#include "base/bits.h" #include "base/bits.h"
#include "base/check_op.h" #include "base/check_op.h"
#include "base/numerics/safe_conversions.h"
namespace base { namespace base {
namespace allocator { namespace allocator {
@ -122,7 +123,7 @@ void* AlignAllocation(void* ptr, size_t alignment) {
// Write the prefix. // Write the prefix.
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1; AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
prefix->original_allocation_offset = prefix->original_allocation_offset =
address - reinterpret_cast<uintptr_t>(ptr); checked_cast<unsigned int>(address - reinterpret_cast<uintptr_t>(ptr));
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
prefix->magic = AlignedPrefix::kMagic; prefix->magic = AlignedPrefix::kMagic;
#endif // DCHECK_IS_ON() #endif // DCHECK_IS_ON()

View File

@ -29,7 +29,8 @@ int OpenApkAsset(const std::string& file_path,
CHECK_EQ(3U, results.size()); CHECK_EQ(3U, results.size());
int fd = static_cast<int>(results[0]); int fd = static_cast<int>(results[0]);
region->offset = results[1]; region->offset = results[1];
region->size = results[2]; // Not a checked_cast because open() may return -1.
region->size = static_cast<size_t>(results[2]);
return fd; return fd;
} }

View File

@ -21,11 +21,11 @@ namespace android {
namespace { namespace {
// We are leaking these strings. // We are leaking these strings.
const char* StrDupParam(const std::vector<std::string>& params, int index) { const char* StrDupParam(const std::vector<std::string>& params, size_t index) {
return strdup(params[index].c_str()); return strdup(params[index].c_str());
} }
int GetIntParam(const std::vector<std::string>& params, int index) { int GetIntParam(const std::vector<std::string>& params, size_t index) {
int ret = 0; int ret = 0;
bool success = StringToInt(params[index], &ret); bool success = StringToInt(params[index], &ret);
DCHECK(success); DCHECK(success);

View File

@ -27,7 +27,7 @@ void JNI_ChildProcessService_RegisterFileDescriptors(
const JavaParamRef<jlongArray>& j_sizes) { const JavaParamRef<jlongArray>& j_sizes) {
std::vector<absl::optional<std::string>> keys; std::vector<absl::optional<std::string>> keys;
JavaObjectArrayReader<jstring> keys_array(j_keys); JavaObjectArrayReader<jstring> keys_array(j_keys);
keys.reserve(keys_array.size()); keys.reserve(checked_cast<size_t>(keys_array.size()));
for (auto str : keys_array) { for (auto str : keys_array) {
absl::optional<std::string> key; absl::optional<std::string> key;
if (str) { if (str) {
@ -54,7 +54,7 @@ void JNI_ChildProcessService_RegisterFileDescriptors(
base::MemoryMappedFile::Region region = {offsets.at(i), base::MemoryMappedFile::Region region = {offsets.at(i),
static_cast<size_t>(sizes.at(i))}; static_cast<size_t>(sizes.at(i))};
const absl::optional<std::string>& key = keys.at(i); const absl::optional<std::string>& key = keys.at(i);
int id = ids.at(i); const auto id = static_cast<GlobalDescriptors::Key>(ids.at(i));
int fd = fds.at(i); int fd = fds.at(i);
if (key) { if (key) {
base::FileDescriptorStore::GetInstance().Set(*key, base::ScopedFD(fd), base::FileDescriptorStore::GetInstance().Set(*key, base::ScopedFD(fd),

View File

@ -15,7 +15,7 @@ jint JNI_CpuFeatures_GetCoreCount(JNIEnv*) {
} }
jlong JNI_CpuFeatures_GetCpuFeatures(JNIEnv*) { jlong JNI_CpuFeatures_GetCpuFeatures(JNIEnv*) {
return android_getCpuFeatures(); return static_cast<jlong>(android_getCpuFeatures());
} }
} // namespace android } // namespace android

View File

@ -106,7 +106,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncBeginEvent(
std::string name = ConvertJavaStringToUTF8(env, jname); std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0( TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(), TRACE_ID_LOCAL(id), internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
TimeTicks::FromJavaNanoTime(time_ns), TimeTicks::FromJavaNanoTime(time_ns),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY); TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
} }
@ -119,7 +120,8 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncEndEvent(
std::string name = ConvertJavaStringToUTF8(env, jname); std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0( TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(), TRACE_ID_LOCAL(id), internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
TimeTicks::FromJavaNanoTime(time_ns), TimeTicks::FromJavaNanoTime(time_ns),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY); TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
} }

View File

@ -23,7 +23,9 @@ namespace {
void AddFrameToTrace(int64_t timestamp_ns, int64_t durations_ns) { void AddFrameToTrace(int64_t timestamp_ns, int64_t durations_ns) {
#if BUILDFLAG(ENABLE_BASE_TRACING) #if BUILDFLAG(ENABLE_BASE_TRACING)
auto t = perfetto::Track(timestamp_ns); if (timestamp_ns < 0)
return;
auto t = perfetto::Track(static_cast<uint64_t>(timestamp_ns));
TRACE_EVENT_BEGIN( TRACE_EVENT_BEGIN(
"ui", "AndroidFrameVsync", t, [&](perfetto::EventContext ctx) { "ui", "AndroidFrameVsync", t, [&](perfetto::EventContext ctx) {
ctx.event()->set_timestamp_absolute_us(timestamp_ns / 1000); ctx.event()->set_timestamp_absolute_us(timestamp_ns / 1000);
@ -75,7 +77,7 @@ void RecordJankMetrics(
std::string missed_frames_histogram_name = std::string missed_frames_histogram_name =
base::StrCat({"Android.Jank.MissedFrames.", scenario_name}); base::StrCat({"Android.Jank.MissedFrames.", scenario_name});
for (unsigned i = 0; i < timestamps_ns.size(); ++i) { for (size_t i = 0; i < timestamps_ns.size(); ++i) {
AddFrameToTrace(timestamps_ns[i], durations_ns[i]); AddFrameToTrace(timestamps_ns[i], durations_ns[i]);
} }

View File

@ -26,7 +26,8 @@ void (*g_java_exception_callback)(const char*);
using JavaExceptionFilter = using JavaExceptionFilter =
base::RepeatingCallback<bool(const JavaRef<jthrowable>&)>; base::RepeatingCallback<bool(const JavaRef<jthrowable>&)>;
LazyInstance<JavaExceptionFilter>::Leaky g_java_exception_filter; LazyInstance<JavaExceptionFilter>::Leaky g_java_exception_filter =
LAZY_INSTANCE_INITIALIZER;
} // namespace } // namespace

View File

@ -26,13 +26,13 @@ namespace base {
namespace android { namespace android {
JavaHandlerThread::JavaHandlerThread(const char* name, JavaHandlerThread::JavaHandlerThread(const char* name,
base::ThreadPriority priority) base::ThreadType thread_type)
: JavaHandlerThread( : JavaHandlerThread(
name, name,
Java_JavaHandlerThread_create( Java_JavaHandlerThread_create(
AttachCurrentThread(), AttachCurrentThread(),
ConvertUTF8ToJavaString(AttachCurrentThread(), name), ConvertUTF8ToJavaString(AttachCurrentThread(), name),
base::internal::ThreadPriorityToNiceValue(priority))) {} base::internal::ThreadTypeToNiceValue(thread_type))) {}
JavaHandlerThread::JavaHandlerThread( JavaHandlerThread::JavaHandlerThread(
const char* name, const char* name,

View File

@ -33,7 +33,7 @@ class BASE_EXPORT JavaHandlerThread {
// Create new thread. // Create new thread.
explicit JavaHandlerThread( explicit JavaHandlerThread(
const char* name, const char* name,
base::ThreadPriority priority = base::ThreadPriority::NORMAL); base::ThreadType thread_type = base::ThreadType::kDefault);
// Wrap and connect to an existing JavaHandlerThread. // Wrap and connect to an existing JavaHandlerThread.
// |obj| is an instance of JavaHandlerThread. // |obj| is an instance of JavaHandlerThread.
explicit JavaHandlerThread( explicit JavaHandlerThread(

View File

@ -5,16 +5,20 @@
#include "base/android/java_runtime.h" #include "base/android/java_runtime.h"
#include "base/android_runtime_jni_headers/Runtime_jni.h" #include "base/android_runtime_jni_headers/Runtime_jni.h"
#include "base/numerics/safe_conversions.h"
namespace base { namespace base {
namespace android { namespace android {
void JavaRuntime::GetMemoryUsage(long* total_memory, long* free_memory) { void JavaRuntime::GetMemoryUsage(uint64_t* total_memory,
uint64_t* free_memory) {
JNIEnv* env = base::android::AttachCurrentThread(); JNIEnv* env = base::android::AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobject> runtime = base::android::ScopedJavaLocalRef<jobject> runtime =
JNI_Runtime::Java_Runtime_getRuntime(env); JNI_Runtime::Java_Runtime_getRuntime(env);
*total_memory = JNI_Runtime::Java_Runtime_totalMemory(env, runtime); *total_memory = checked_cast<uint64_t>(
*free_memory = JNI_Runtime::Java_Runtime_freeMemory(env, runtime); JNI_Runtime::Java_Runtime_totalMemory(env, runtime));
*free_memory = checked_cast<uint64_t>(
JNI_Runtime::Java_Runtime_freeMemory(env, runtime));
} }
} // namespace android } // namespace android

View File

@ -16,7 +16,7 @@ class BASE_EXPORT JavaRuntime {
public: public:
// Fills the total memory used and memory allocated for objects by the java // Fills the total memory used and memory allocated for objects by the java
// heap in the current process. Returns true on success. // heap in the current process. Returns true on success.
static void GetMemoryUsage(long* total_memory, long* free_memory); static void GetMemoryUsage(uint64_t* total_memory, uint64_t* free_memory);
}; };
} // namespace android } // namespace android

View File

@ -9,6 +9,7 @@
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/android/jni_string.h" #include "base/android/jni_string.h"
#include "base/check_op.h" #include "base/check_op.h"
#include "base/numerics/safe_conversions.h"
namespace base { namespace base {
namespace android { namespace android {
@ -30,11 +31,12 @@ size_t SafeGetArrayLength(JNIEnv* env, const JavaRef<JavaArrayType>& jarray) {
ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env, ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
const uint8_t* bytes, const uint8_t* bytes,
size_t len) { size_t len) {
jbyteArray byte_array = env->NewByteArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jbyteArray byte_array = env->NewByteArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(byte_array); DCHECK(byte_array);
env->SetByteArrayRegion(byte_array, 0, len, env->SetByteArrayRegion(byte_array, 0, len_jsize,
reinterpret_cast<const jbyte*>(bytes)); reinterpret_cast<const jbyte*>(bytes));
CheckException(env); CheckException(env);
@ -56,11 +58,12 @@ ScopedJavaLocalRef<jbyteArray> ToJavaByteArray(JNIEnv* env,
ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env, ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
const bool* bools, const bool* bools,
size_t len) { size_t len) {
jbooleanArray boolean_array = env->NewBooleanArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jbooleanArray boolean_array = env->NewBooleanArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(boolean_array); DCHECK(boolean_array);
env->SetBooleanArrayRegion(boolean_array, 0, len, env->SetBooleanArrayRegion(boolean_array, 0, len_jsize,
reinterpret_cast<const jboolean*>(bools)); reinterpret_cast<const jboolean*>(bools));
CheckException(env); CheckException(env);
@ -70,11 +73,12 @@ ScopedJavaLocalRef<jbooleanArray> ToJavaBooleanArray(JNIEnv* env,
ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env, ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
const int* ints, const int* ints,
size_t len) { size_t len) {
jintArray int_array = env->NewIntArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jintArray int_array = env->NewIntArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(int_array); DCHECK(int_array);
env->SetIntArrayRegion(int_array, 0, len, env->SetIntArrayRegion(int_array, 0, len_jsize,
reinterpret_cast<const jint*>(ints)); reinterpret_cast<const jint*>(ints));
CheckException(env); CheckException(env);
@ -89,11 +93,12 @@ ScopedJavaLocalRef<jintArray> ToJavaIntArray(JNIEnv* env,
ScopedJavaLocalRef<jlongArray> ToJavaLongArray(JNIEnv* env, ScopedJavaLocalRef<jlongArray> ToJavaLongArray(JNIEnv* env,
const int64_t* longs, const int64_t* longs,
size_t len) { size_t len) {
jlongArray long_array = env->NewLongArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jlongArray long_array = env->NewLongArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(long_array); DCHECK(long_array);
env->SetLongArrayRegion(long_array, 0, len, env->SetLongArrayRegion(long_array, 0, len_jsize,
reinterpret_cast<const jlong*>(longs)); reinterpret_cast<const jlong*>(longs));
CheckException(env); CheckException(env);
@ -110,11 +115,12 @@ BASE_EXPORT ScopedJavaLocalRef<jlongArray> ToJavaLongArray(
// Returns a new Java float array converted from the given C++ float array. // Returns a new Java float array converted from the given C++ float array.
BASE_EXPORT ScopedJavaLocalRef<jfloatArray> BASE_EXPORT ScopedJavaLocalRef<jfloatArray>
ToJavaFloatArray(JNIEnv* env, const float* floats, size_t len) { ToJavaFloatArray(JNIEnv* env, const float* floats, size_t len) {
jfloatArray float_array = env->NewFloatArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jfloatArray float_array = env->NewFloatArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(float_array); DCHECK(float_array);
env->SetFloatArrayRegion(float_array, 0, len, env->SetFloatArrayRegion(float_array, 0, len_jsize,
reinterpret_cast<const jfloat*>(floats)); reinterpret_cast<const jfloat*>(floats));
CheckException(env); CheckException(env);
@ -129,11 +135,12 @@ BASE_EXPORT ScopedJavaLocalRef<jfloatArray> ToJavaFloatArray(
BASE_EXPORT ScopedJavaLocalRef<jdoubleArray> BASE_EXPORT ScopedJavaLocalRef<jdoubleArray>
ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len) { ToJavaDoubleArray(JNIEnv* env, const double* doubles, size_t len) {
jdoubleArray double_array = env->NewDoubleArray(len); const jsize len_jsize = checked_cast<jsize>(len);
jdoubleArray double_array = env->NewDoubleArray(len_jsize);
CheckException(env); CheckException(env);
DCHECK(double_array); DCHECK(double_array);
env->SetDoubleArrayRegion(double_array, 0, len, env->SetDoubleArrayRegion(double_array, 0, len_jsize,
reinterpret_cast<const jdouble*>(doubles)); reinterpret_cast<const jdouble*>(doubles));
CheckException(env); CheckException(env);
@ -150,11 +157,12 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
JNIEnv* env, JNIEnv* env,
ScopedJavaLocalRef<jclass> clazz, ScopedJavaLocalRef<jclass> clazz,
base::span<const ScopedJavaLocalRef<jobject>> v) { base::span<const ScopedJavaLocalRef<jobject>> v) {
jobjectArray joa = env->NewObjectArray(v.size(), clazz.obj(), nullptr); jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, i, v[i].obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -170,12 +178,12 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
base::span<const ScopedJavaGlobalRef<jobject>> v) { base::span<const ScopedJavaGlobalRef<jobject>> v) {
ScopedJavaLocalRef<jclass> object_array_clazz = ScopedJavaLocalRef<jclass> object_array_clazz =
GetClass(env, "java/lang/Object"); GetClass(env, "java/lang/Object");
jobjectArray joa = jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
env->NewObjectArray(v.size(), object_array_clazz.obj(), nullptr); object_array_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, i, v[i].obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -184,11 +192,12 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env, JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v, base::span<const ScopedJavaLocalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) { ScopedJavaLocalRef<jclass> type) {
jobjectArray joa = env->NewObjectArray(v.size(), type.obj(), nullptr); jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, i, v[i].obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -197,11 +206,12 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env, JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v, base::span<const ScopedJavaGlobalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) { ScopedJavaLocalRef<jclass> type) {
jobjectArray joa = env->NewObjectArray(v.size(), type.obj(), nullptr); jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
env->SetObjectArrayElement(joa, i, v[i].obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), v[i].obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -210,14 +220,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env, JNIEnv* env,
base::span<const std::string> v) { base::span<const std::string> v) {
ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B"); ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B");
jobjectArray joa = jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
env->NewObjectArray(v.size(), byte_array_clazz.obj(), nullptr); byte_array_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array = ToJavaByteArray( ScopedJavaLocalRef<jbyteArray> byte_array = ToJavaByteArray(
env, reinterpret_cast<const uint8_t*>(v[i].data()), v[i].length()); env, reinterpret_cast<const uint8_t*>(v[i].data()), v[i].length());
env->SetObjectArrayElement(joa, i, byte_array.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -226,14 +236,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(
JNIEnv* env, JNIEnv* env,
base::span<const std::vector<uint8_t>> v) { base::span<const std::vector<uint8_t>> v) {
ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B"); ScopedJavaLocalRef<jclass> byte_array_clazz = GetClass(env, "[B");
jobjectArray joa = jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
env->NewObjectArray(v.size(), byte_array_clazz.obj(), nullptr); byte_array_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jbyteArray> byte_array = ScopedJavaLocalRef<jbyteArray> byte_array =
ToJavaByteArray(env, v[i].data(), v[i].size()); ToJavaByteArray(env, v[i].data(), v[i].size());
env->SetObjectArrayElement(joa, i, byte_array.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), byte_array.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -242,12 +252,13 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env, JNIEnv* env,
base::span<const std::string> v) { base::span<const std::string> v) {
ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String"); ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String");
jobjectArray joa = env->NewObjectArray(v.size(), string_clazz.obj(), nullptr); jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
string_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF8ToJavaString(env, v[i]); ScopedJavaLocalRef<jstring> item = ConvertUTF8ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, i, item.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -258,14 +269,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
ScopedJavaLocalRef<jclass> string_array_clazz = ScopedJavaLocalRef<jclass> string_array_clazz =
GetClass(env, "[Ljava/lang/String;"); GetClass(env, "[Ljava/lang/String;");
jobjectArray joa = jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(vec_outer.size()),
env->NewObjectArray(vec_outer.size(), string_array_clazz.obj(), nullptr); string_array_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < vec_outer.size(); ++i) { for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner = ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]); ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, i, inner.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
@ -277,14 +288,14 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStringArray(
ScopedJavaLocalRef<jclass> string_array_clazz = ScopedJavaLocalRef<jclass> string_array_clazz =
GetClass(env, "[Ljava/lang/String;"); GetClass(env, "[Ljava/lang/String;");
jobjectArray joa = jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(vec_outer.size()),
env->NewObjectArray(vec_outer.size(), string_array_clazz.obj(), nullptr); string_array_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < vec_outer.size(); ++i) { for (size_t i = 0; i < vec_outer.size(); ++i) {
ScopedJavaLocalRef<jobjectArray> inner = ScopedJavaLocalRef<jobjectArray> inner =
ToJavaArrayOfStrings(env, vec_outer[i]); ToJavaArrayOfStrings(env, vec_outer[i]);
env->SetObjectArrayElement(joa, i, inner.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), inner.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
@ -294,12 +305,13 @@ ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfStrings(
JNIEnv* env, JNIEnv* env,
base::span<const std::u16string> v) { base::span<const std::u16string> v) {
ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String"); ScopedJavaLocalRef<jclass> string_clazz = GetClass(env, "java/lang/String");
jobjectArray joa = env->NewObjectArray(v.size(), string_clazz.obj(), nullptr); jobjectArray joa = env->NewObjectArray(checked_cast<jsize>(v.size()),
string_clazz.obj(), nullptr);
CheckException(env); CheckException(env);
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
ScopedJavaLocalRef<jstring> item = ConvertUTF16ToJavaString(env, v[i]); ScopedJavaLocalRef<jstring> item = ConvertUTF16ToJavaString(env, v[i]);
env->SetObjectArrayElement(joa, i, item.obj()); env->SetObjectArrayElement(joa, static_cast<jsize>(i), item.obj());
} }
return ScopedJavaLocalRef<jobjectArray>(env, joa); return ScopedJavaLocalRef<jobjectArray>(env, joa);
} }
@ -315,7 +327,8 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
out->resize(back + len); out->resize(back + len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str( ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(array.obj(), i))); env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
ConvertJavaStringToUTF16(env, str.obj(), out->data() + back + i); ConvertJavaStringToUTF16(env, str.obj(), out->data() + back + i);
} }
} }
@ -331,7 +344,8 @@ void AppendJavaStringArrayToStringVector(JNIEnv* env,
out->resize(back + len); out->resize(back + len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jstring> str( ScopedJavaLocalRef<jstring> str(
env, static_cast<jstring>(env->GetObjectArrayElement(array.obj(), i))); env, static_cast<jstring>(env->GetObjectArrayElement(
array.obj(), static_cast<jsize>(i))));
ConvertJavaStringToUTF8(env, str.obj(), out->data() + back + i); ConvertJavaStringToUTF8(env, str.obj(), out->data() + back + i);
} }
} }
@ -347,7 +361,7 @@ void AppendJavaByteArrayToByteVector(JNIEnv* env,
return; return;
size_t back = out->size(); size_t back = out->size();
out->resize(back + len); out->resize(back + len);
env->GetByteArrayRegion(byte_array.obj(), 0, len, env->GetByteArrayRegion(byte_array.obj(), 0, static_cast<jsize>(len),
reinterpret_cast<int8_t*>(out->data() + back)); reinterpret_cast<int8_t*>(out->data() + back));
} }
@ -397,7 +411,8 @@ void JavaIntArrayToIntVector(JNIEnv* env,
out->resize(len); out->resize(len);
if (!len) if (!len)
return; return;
env->GetIntArrayRegion(int_array.obj(), 0, len, out->data()); env->GetIntArrayRegion(int_array.obj(), 0, static_cast<jsize>(len),
out->data());
} }
void JavaLongArrayToInt64Vector(JNIEnv* env, void JavaLongArrayToInt64Vector(JNIEnv* env,
@ -418,7 +433,8 @@ void JavaLongArrayToLongVector(JNIEnv* env,
out->resize(len); out->resize(len);
if (!len) if (!len)
return; return;
env->GetLongArrayRegion(long_array.obj(), 0, len, out->data()); env->GetLongArrayRegion(long_array.obj(), 0, static_cast<jsize>(len),
out->data());
} }
void JavaFloatArrayToFloatVector(JNIEnv* env, void JavaFloatArrayToFloatVector(JNIEnv* env,
@ -429,7 +445,8 @@ void JavaFloatArrayToFloatVector(JNIEnv* env,
out->resize(len); out->resize(len);
if (!len) if (!len)
return; return;
env->GetFloatArrayRegion(float_array.obj(), 0, len, out->data()); env->GetFloatArrayRegion(float_array.obj(), 0, static_cast<jsize>(len),
out->data());
} }
void JavaDoubleArrayToDoubleVector(JNIEnv* env, void JavaDoubleArrayToDoubleVector(JNIEnv* env,
@ -440,7 +457,8 @@ void JavaDoubleArrayToDoubleVector(JNIEnv* env,
out->resize(len); out->resize(len);
if (!len) if (!len)
return; return;
env->GetDoubleArrayRegion(double_array.obj(), 0, len, out->data()); env->GetDoubleArrayRegion(double_array.obj(), 0, static_cast<jsize>(len),
out->data());
} }
void JavaArrayOfByteArrayToStringVector(JNIEnv* env, void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
@ -451,9 +469,9 @@ void JavaArrayOfByteArrayToStringVector(JNIEnv* env,
out->resize(len); out->resize(len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array( ScopedJavaLocalRef<jbyteArray> bytes_array(
env, env, static_cast<jbyteArray>(env->GetObjectArrayElement(
static_cast<jbyteArray>(env->GetObjectArrayElement(array.obj(), i))); array.obj(), static_cast<jsize>(i))));
jsize bytes_len = env->GetArrayLength(bytes_array.obj()); size_t bytes_len = SafeGetArrayLength(env, bytes_array);
jbyte* bytes = env->GetByteArrayElements(bytes_array.obj(), nullptr); jbyte* bytes = env->GetByteArrayElements(bytes_array.obj(), nullptr);
(*out)[i].assign(reinterpret_cast<const char*>(bytes), bytes_len); (*out)[i].assign(reinterpret_cast<const char*>(bytes), bytes_len);
env->ReleaseByteArrayElements(bytes_array.obj(), bytes, JNI_ABORT); env->ReleaseByteArrayElements(bytes_array.obj(), bytes, JNI_ABORT);
@ -468,8 +486,8 @@ void JavaArrayOfByteArrayToBytesVector(JNIEnv* env,
out->resize(len); out->resize(len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jbyteArray> bytes_array( ScopedJavaLocalRef<jbyteArray> bytes_array(
env, env, static_cast<jbyteArray>(env->GetObjectArrayElement(
static_cast<jbyteArray>(env->GetObjectArrayElement(array.obj(), i))); array.obj(), static_cast<jsize>(i))));
JavaByteArrayToByteVector(env, bytes_array, &(*out)[i]); JavaByteArrayToByteVector(env, bytes_array, &(*out)[i]);
} }
} }
@ -483,8 +501,8 @@ void Java2dStringArrayTo2dStringVector(
out->resize(len); out->resize(len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array( ScopedJavaLocalRef<jobjectArray> strings_array(
env, env, static_cast<jobjectArray>(env->GetObjectArrayElement(
static_cast<jobjectArray>(env->GetObjectArrayElement(array.obj(), i))); array.obj(), static_cast<jsize>(i))));
out->at(i).clear(); out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i)); AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
@ -500,8 +518,8 @@ void Java2dStringArrayTo2dStringVector(
out->resize(len); out->resize(len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jobjectArray> strings_array( ScopedJavaLocalRef<jobjectArray> strings_array(
env, env, static_cast<jobjectArray>(env->GetObjectArrayElement(
static_cast<jobjectArray>(env->GetObjectArrayElement(array.obj(), i))); array.obj(), static_cast<jsize>(i))));
out->at(i).clear(); out->at(i).clear();
AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i)); AppendJavaStringArrayToStringVector(env, strings_array, &out->at(i));
@ -516,8 +534,8 @@ void JavaArrayOfIntArrayToIntVector(JNIEnv* env,
out->resize(len); out->resize(len);
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ScopedJavaLocalRef<jintArray> int_array( ScopedJavaLocalRef<jintArray> int_array(
env, env, static_cast<jintArray>(env->GetObjectArrayElement(
static_cast<jintArray>(env->GetObjectArrayElement(array.obj(), i))); array.obj(), static_cast<jsize>(i))));
JavaIntArrayToIntVector(env, int_array, &out->at(i)); JavaIntArrayToIntVector(env, int_array, &out->at(i));
} }
} }

View File

@ -99,6 +99,5 @@ java_annotation_processor("jni_processor") {
"//third_party/android_deps:com_google_auto_service_auto_service_annotations_java", "//third_party/android_deps:com_google_auto_service_auto_service_annotations_java",
"//third_party/android_deps:com_google_guava_guava_java", "//third_party/android_deps:com_google_guava_guava_java",
"//third_party/android_deps:com_squareup_javapoet_java", "//third_party/android_deps:com_squareup_javapoet_java",
"//third_party/android_deps:javax_annotation_jsr250_api_java",
] ]
} }

View File

@ -20,15 +20,17 @@ def CommonChecks(input_api, output_api):
'PYTHONDONTWRITEBYTECODE': '1', 'PYTHONDONTWRITEBYTECODE': '1',
}) })
return input_api.canned_checks.RunUnitTests( return input_api.RunTests(
input_api.canned_checks.GetUnitTests(
input_api, input_api,
output_api, output_api,
run_on_python2=False,
unit_tests=[ unit_tests=[
input_api.os_path.join(base_android_jni_generator_dir, input_api.os_path.join(base_android_jni_generator_dir,
'jni_generator_tests.py') 'jni_generator_tests.py')
], ],
env=env, env=env,
) ))
def CheckChangeOnUpload(input_api, output_api): def CheckChangeOnUpload(input_api, output_api):

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
@ -11,8 +11,6 @@ code generator and ensures the output matches a golden
file. file.
""" """
from __future__ import print_function
import collections import collections
import difflib import difflib
import inspect import inspect

View File

@ -6,6 +6,7 @@
#include "base/android/jni_android.h" #include "base/android/jni_android.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/utf_string_conversions.h" #include "base/strings/utf_string_conversions.h"
namespace { namespace {
@ -13,8 +14,8 @@ namespace {
// Internal version that does not use a scoped local pointer. // Internal version that does not use a scoped local pointer.
jstring ConvertUTF16ToJavaStringImpl(JNIEnv* env, jstring ConvertUTF16ToJavaStringImpl(JNIEnv* env,
const base::StringPiece16& str) { const base::StringPiece16& str) {
jstring result = jstring result = env->NewString(reinterpret_cast<const jchar*>(str.data()),
env->NewString(reinterpret_cast<const jchar*>(str.data()), str.length()); base::checked_cast<jsize>(str.length()));
base::android::CheckException(env); base::android::CheckException(env);
return result; return result;
} }
@ -32,7 +33,7 @@ void ConvertJavaStringToUTF8(JNIEnv* env, jstring str, std::string* result) {
return; return;
} }
const jsize length = env->GetStringLength(str); const jsize length = env->GetStringLength(str);
if (!length) { if (length <= 0) {
result->clear(); result->clear();
CheckException(env); CheckException(env);
return; return;
@ -42,7 +43,8 @@ void ConvertJavaStringToUTF8(JNIEnv* env, jstring str, std::string* result) {
// function that yields plain (non Java-modified) UTF8. // function that yields plain (non Java-modified) UTF8.
const jchar* chars = env->GetStringChars(str, NULL); const jchar* chars = env->GetStringChars(str, NULL);
DCHECK(chars); DCHECK(chars);
UTF16ToUTF8(reinterpret_cast<const char16_t*>(chars), length, result); UTF16ToUTF8(reinterpret_cast<const char16_t*>(chars),
static_cast<size_t>(length), result);
env->ReleaseStringChars(str, chars); env->ReleaseStringChars(str, chars);
CheckException(env); CheckException(env);
} }
@ -84,7 +86,7 @@ void ConvertJavaStringToUTF16(JNIEnv* env,
return; return;
} }
const jsize length = env->GetStringLength(str); const jsize length = env->GetStringLength(str);
if (!length) { if (length <= 0) {
result->clear(); result->clear();
CheckException(env); CheckException(env);
return; return;
@ -93,7 +95,8 @@ void ConvertJavaStringToUTF16(JNIEnv* env,
DCHECK(chars); DCHECK(chars);
// GetStringChars isn't required to NULL-terminate the strings // GetStringChars isn't required to NULL-terminate the strings
// it returns, so the length must be explicitly checked. // it returns, so the length must be explicitly checked.
result->assign(reinterpret_cast<const char16_t*>(chars), length); result->assign(reinterpret_cast<const char16_t*>(chars),
static_cast<size_t>(length));
env->ReleaseStringChars(str, chars); env->ReleaseStringChars(str, chars);
CheckException(env); CheckException(env);
} }

View File

@ -1,30 +0,0 @@
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
results = []
# Dictionary of base/android files with corresponding Robolectric shadows.
# If new functions are added to the original file, it is very likely that
# function with the same signature should be added to the shadow.
impl_to_shadow_paths = {
'base/android/java/src/org/'
'chromium/base/metrics/RecordHistogram.java':
'base/android/junit/src/org/'
'chromium/base/metrics/test/ShadowRecordHistogram.java'
}
for impl_path, shadow_path in impl_to_shadow_paths.items():
if impl_path in input_api.change.LocalPaths():
if shadow_path not in input_api.change.LocalPaths():
results.append(
output_api.PresubmitPromptWarning(
'You modified the runtime class:\n {}\n'
'without changing the corresponding shadow test class'
':\n {}\n').format(impl_path, shadow_path))
return results

Some files were not shown because too many files have changed in this diff Show More