mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-11-23 22:06:12 +03:00
Compare commits
No commits in common. "676417d610798e47d16d439f046fc57e71baa4dc" and "43d9635938309c9dd331cfb8302a840e2818a9a7" have entirely different histories.
676417d610
...
43d9635938
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@ -123,7 +123,7 @@ jobs:
|
||||
- name: Cache sysroot
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: src/out/sysroot-build/sid/sid_*
|
||||
path: src/out/sysroot-build/bullseye/bullseye_*
|
||||
key: sysroot-linux-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-v${{ env.CACHE_EPOCH }}
|
||||
- id: ccache-timestamp
|
||||
run: echo "::set-output name=date::$(date +%s)"
|
||||
|
@ -1 +1 @@
|
||||
101.0.4951.41
|
||||
102.0.5005.61
|
||||
|
6
src/.gn
6
src/.gn
@ -82,6 +82,12 @@ no_check_targets = [
|
||||
"//v8:cppgc_base", # 1 error
|
||||
"//v8:v8_internal_headers", # 11 errors
|
||||
"//v8:v8_libplatform", # 2 errors
|
||||
|
||||
# After making partition_alloc a standalone library, remove partition_alloc
|
||||
# target from the skip list, because partition_aloc will depend on its own
|
||||
# base.
|
||||
# partition alloc standalone library bug is https://crbug.com/1151236.
|
||||
"//base/allocator/partition_allocator:partition_alloc", # 292 errors
|
||||
]
|
||||
|
||||
# These are the list of GN files that run exec_script. This whitelist exists
|
||||
|
14
src/AUTHORS
14
src/AUTHORS
@ -92,6 +92,7 @@ anatoly techtonik <techtonik@gmail.com>
|
||||
Ancil George <ancilgeorge@samsung.com>
|
||||
Andra Paraschiv <andra.paraschiv@intel.com>
|
||||
Andras Tokodi <a.tokodi@eyeo.com>
|
||||
Andreas Nazlidis <andreas221b@gmail.com>
|
||||
Andreas Papacharalampous <andreas@apap04.com>
|
||||
Andrei Borza <andrei.borza@gmail.com>
|
||||
Andrei Parvu <andrei.prv@gmail.com>
|
||||
@ -115,11 +116,13 @@ Anne Kao <annekao94@gmail.com>
|
||||
Anshul Jain <anshul.jain@samsung.com>
|
||||
Anssi Hannula <anssi.hannula@iki.fi>
|
||||
Anthony Halliday <anth.halliday12@gmail.com>
|
||||
Anton Bershanskiy <bershanskiy@pm.me>
|
||||
Anton Bershanskiy <8knots@protonmail.com>
|
||||
Anton Obzhirov <a.obzhirov@samsung.com>
|
||||
Antonin Hildebrand <antonin.hildebrand@gmail.com>
|
||||
Antonio Gomes <a1.gomes@sisa.samsung.com>
|
||||
Anuj Kumar Sharma <anujk.sharma@samsung.com>
|
||||
Ao Sun <ntusunao@gmail.com>
|
||||
Ao Wang <wangao.james@bytedance.com>
|
||||
Arjun Karthik <arjunkar@amazon.com>
|
||||
Arman Ghotb <armanghotb@gmail.com>
|
||||
Armin Burgmeier <aburgmeier@bloomberg.net>
|
||||
@ -248,6 +251,7 @@ Daniel Bomar <dbdaniel42@gmail.com>
|
||||
Daniel Carvalho Liedke <dliedke@gmail.com>
|
||||
Daniel Charles <daniel.charles@intel.com>
|
||||
Daniel Imms <daniimms@amazon.com>
|
||||
Daniel Izquierdo <daniel.izquierdo@gmail.com>
|
||||
Daniel Johnson <danielj41@gmail.com>
|
||||
Daniel Lockyer <thisisdaniellockyer@gmail.com>
|
||||
Daniel Nishi <dhnishi@gmail.com>
|
||||
@ -349,6 +353,7 @@ Evgeniy Dushistov <dushistov@gmail.com>
|
||||
Evgeny Agafonchikov <evgeny.agafonchikov@akvelon.com>
|
||||
Fabian Henneke <fabian.henneke@gmail.com>
|
||||
Fabien Tassin <fta@sofaraway.org>
|
||||
Feifei Wang <alexswang@tencent.com>
|
||||
Felipe Erias Morandeira <felipeerias@gmail.com>
|
||||
Felix H. Dahlke <fhd@ubercode.de>
|
||||
Felix Weilbach <feweilbach@gmail.com>
|
||||
@ -576,6 +581,7 @@ Jongsoo Lee <leejongsoo@gmail.com>
|
||||
Joone Hur <joone.hur@intel.com>
|
||||
Joonghun Park <pjh0718@gmail.com>
|
||||
Jorge Villatoro <jorge@tomatocannon.com>
|
||||
Jorrit Jongma <jorrit@jongma.org>
|
||||
Joseph Gentle <josephg@gmail.com>
|
||||
Joseph Lolak <joseph.lolak@samsung.com>
|
||||
Josh Triplett <josh.triplett@intel.com>
|
||||
@ -713,6 +719,7 @@ Luke Seunghoe Gu <gulukesh@gmail.com>
|
||||
Luke Zarko <lukezarko@gmail.com>
|
||||
Luoxi Pan <l.panpax@gmail.com>
|
||||
Lu Yahan <yahan@iscas.ac.cn>
|
||||
Ma Aiguo <maaiguo@uniontech.com>
|
||||
Maarten Lankhorst <m.b.lankhorst@gmail.com>
|
||||
Maciej Pawlowski <m.pawlowski@eyeo.com>
|
||||
Magnus Danielsson <fuzzac@gmail.com>
|
||||
@ -949,6 +956,7 @@ Ravi Nanjundappa <nravi.n@samsung.com>
|
||||
Ravi Phaneendra Kasibhatla <r.kasibhatla@samsung.com>
|
||||
Ravi Phaneendra Kasibhatla <ravi.kasibhatla@motorola.com>
|
||||
Raviraj Sitaram <raviraj.p.sitaram@intel.com>
|
||||
Rebecca Chang Swee Fun <rebecca.chang@starfivetech.com>
|
||||
Reda Tawfik <redatawfik@noogler.google.com>
|
||||
Réda Housni Alaoui <alaoui.rda@gmail.com>
|
||||
Refael Ackermann <refack@gmail.com>
|
||||
@ -1159,6 +1167,7 @@ Tibor Dusnoki <tibor.dusnoki.91@gmail.com>
|
||||
Tibor Dusnoki <tdusnoki@inf.u-szeged.hu>
|
||||
Tim Ansell <mithro@mithis.com>
|
||||
Tim Niederhausen <tim@rnc-ag.de>
|
||||
Tim Steiner <twsteiner@gmail.com>
|
||||
Timo Gurr <timo.gurr@gmail.com>
|
||||
Timo Reimann <ttr314@googlemail.com>
|
||||
Timo Witte <timo.witte@gmail.com>
|
||||
@ -1233,6 +1242,7 @@ Xiaolei Yu <dreifachstein@gmail.com>
|
||||
Xiaoshu Zhang <xiaoshu@amazon.com>
|
||||
Xiaoyin Liu <xiaoyin.l@outlook.com>
|
||||
Xinchao He <hexinchao@gmail.com>
|
||||
Xinchao Tian <tianxinchao@360.cn>
|
||||
Xing Zhang <xzhang@adobe.com>
|
||||
Xinghua Cao <xinghua.cao@intel.com>
|
||||
Xu Samuel <samuel.xu@intel.com>
|
||||
@ -1246,6 +1256,7 @@ Xunran Ding <dingxunran@gmail.com>
|
||||
Yael Aharon <yael.aharon@intel.com>
|
||||
Yan Wang <yan0422.wang@samsung.com>
|
||||
Yang Gu <yang.gu@intel.com>
|
||||
Yang Liu <jd9668954@gmail.com>
|
||||
Yannic Bonenberger <yannic.bonenberger@gmail.com>
|
||||
Yarin Kaul <yarin.kaul@gmail.com>
|
||||
Yash Vempati <vempatiy@amazon.com>
|
||||
@ -1260,6 +1271,7 @@ Yi Zhang <yi.y.zhang@intel.com>
|
||||
Yizhou Jiang <yizhou.jiang@intel.com>
|
||||
Yoav Weiss <yoav@yoav.ws>
|
||||
Yoav Zilberberg <yoav.zilberberg@gmail.com>
|
||||
Yong Ling <yongling@tencent.com>
|
||||
Yong Shin <sy3620@gmail.com>
|
||||
Yong Wang <ccyongwang@tencent.com>
|
||||
Yongha Lee <yongha78.lee@samsung.com>
|
||||
|
246
src/DEPS
246
src/DEPS
@ -105,10 +105,10 @@ vars = {
|
||||
# be overridden by gclient variables.
|
||||
'checkout_google_benchmark': False,
|
||||
|
||||
# By default, do not checkout JavaScript coverage node modules. These packages
|
||||
# By default, checkout JavaScript coverage node modules. These packages
|
||||
# are used to post-process raw v8 coverage reports into IstanbulJS compliant
|
||||
# output.
|
||||
'checkout_js_coverage_modules': False,
|
||||
'checkout_js_coverage_modules': True,
|
||||
|
||||
# Check out and download nacl by default, unless on an arm mac.
|
||||
# This can be disabled e.g. with custom_vars.
|
||||
@ -213,7 +213,7 @@ vars = {
|
||||
# luci-go CIPD package version.
|
||||
# Make sure the revision is uploaded by infra-packagers builder.
|
||||
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
|
||||
'luci_go': 'git_revision:cb424e70e75136736a86359ef070aa96425fe7a3',
|
||||
'luci_go': 'git_revision:6da0608e4fa8a3c6d1fa4f855485c0038b05bf72',
|
||||
|
||||
# This can be overridden, e.g. with custom_vars, to build clang from HEAD
|
||||
# instead of downloading the prebuilt pinned revision.
|
||||
@ -238,7 +238,7 @@ vars = {
|
||||
'dawn_standalone': False,
|
||||
|
||||
# reclient CIPD package version
|
||||
'reclient_version': 're_client_version:0.57.0.4865132-gomaip',
|
||||
'reclient_version': 're_client_version:0.59.0.7914303-gomaip',
|
||||
|
||||
'android_git': 'https://android.googlesource.com',
|
||||
'aomedia_git': 'https://aomedia.googlesource.com',
|
||||
@ -253,30 +253,30 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling Skia
|
||||
# and whatever else without interference from each other.
|
||||
'skia_revision': 'ef69dcf0c2dbda06cd47082f42a2433f7ba86a78',
|
||||
'skia_revision': '3338e90707323d2cd3a150276acb9f39933deee2',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling V8
|
||||
# and whatever else without interference from each other.
|
||||
'v8_revision': 'e92a31cfd7d06ea1b45a428bf90e1c910b3c003c',
|
||||
'v8_revision': '87c27db79e6a35a6bdedcbfe732f978812bf6ced',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling ANGLE
|
||||
# and whatever else without interference from each other.
|
||||
'angle_revision': 'e37380e62a427cbb7172b6c17f8752ab96abf356',
|
||||
'angle_revision': '6661eb4900dae62cbe9af5023f9c1e7105798b50',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling SwiftShader
|
||||
# and whatever else without interference from each other.
|
||||
'swiftshader_revision': 'd01dca1d18d03f055d0dabd99a210b5f666715ed',
|
||||
'swiftshader_revision': '103a69bd6c82980c967c2f4002c9a302ea67c716',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling PDFium
|
||||
# and whatever else without interference from each other.
|
||||
'pdfium_revision': 'f4c62094abe20ade3a29328bc370238b7e7812b2',
|
||||
'pdfium_revision': '62ad9af8a9f9494645b659674b64bb51775cde05',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling BoringSSL
|
||||
# and whatever else without interference from each other.
|
||||
#
|
||||
# Note this revision should be updated with
|
||||
# third_party/boringssl/roll_boringssl.py, not roll-dep.
|
||||
'boringssl_revision': '4d955d20d27bcf3ae71df091ad17d95229a7eb56',
|
||||
'boringssl_revision': '27ffcc6e19bbafddf1b59ec0bc6df2904de7eb2c',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling google-toolbox-for-mac
|
||||
# and whatever else without interference from each other.
|
||||
@ -284,7 +284,7 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling googletest
|
||||
# and whatever else without interference from each other.
|
||||
'googletest_revision': 'b007c54f2944e193ac44fba1bc997cb65826a0b9',
|
||||
'googletest_revision': 'af29db7ec28d6df1c7f0f745186884091e602e07',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling lighttpd
|
||||
# and whatever else without interference from each other.
|
||||
@ -296,11 +296,11 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling NaCl
|
||||
# and whatever else without interference from each other.
|
||||
'nacl_revision': 'cd81f598e8de34678c9d9de21742c7e56654e8d3',
|
||||
'nacl_revision': 'f231a6e8c08f6733c072ae9cca3ce00f42edd9ff',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling freetype
|
||||
# and whatever else without interference from each other.
|
||||
'freetype_revision': '1e2eb65048f75c64b68708efed6ce904c31f3b2f',
|
||||
'freetype_revision': '3100c8120e0ff423db8d8134a8073e639371993e',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling freetype
|
||||
# and whatever else without interference from each other.
|
||||
@ -308,7 +308,7 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling HarfBuzz
|
||||
# and whatever else without interference from each other.
|
||||
'harfbuzz_revision': '965cf1d66589b0db60e75961cc58f5a65521078e',
|
||||
'harfbuzz_revision': '6454cec085ba51cefcd12b1f8027bc4a647347d5',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling Emoji Segmenter
|
||||
# and whatever else without interference from each other.
|
||||
@ -320,7 +320,7 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling catapult
|
||||
# and whatever else without interference from each other.
|
||||
'catapult_revision': '46cc0e470b61a3ba12ae80efffa41f7c9bb87205',
|
||||
'catapult_revision': '3cf2f4f0e03be4dc0f4a26cb3943b4a719643e1b',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling libFuzzer
|
||||
# and whatever else without interference from each other.
|
||||
@ -328,7 +328,7 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling devtools-frontend
|
||||
# and whatever else without interference from each other.
|
||||
'devtools_frontend_revision': 'b2a3923617328a970bf07eca721bb8f6ad9c4574',
|
||||
'devtools_frontend_revision': '46a28a3c5dadb2a79680c35a61000e908aee74dc',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling libprotobuf-mutator
|
||||
# and whatever else without interference from each other.
|
||||
@ -364,15 +364,11 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'shaderc_revision': '96b1dd72a827304817470274a470c4d3b2293451',
|
||||
'dawn_revision': 'fa8cc68ff7c055512e83a538e5517400f5f053bc',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'dawn_revision': '211e96c6069c66c6d503cfa2b35226c7118c7927',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'quiche_revision': '41a1bdecabbd03b35702bacb51f6b14775a07e4d',
|
||||
'quiche_revision': '7e841d3541a113b5ed577824c9aa71b8a1c7617f',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling ios_webkit
|
||||
# and whatever else without interference from each other.
|
||||
@ -392,11 +388,11 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling libavif
|
||||
# and whatever else without interference from each other.
|
||||
'libavif_revision': '632d13188f9b7faa40f20d870e792174b8b5b8e6',
|
||||
'libavif_revision': 'ccf5a781238b43fee428519ba6e9508204835b9c',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling nearby
|
||||
# and whatever else without interference from each other.
|
||||
'nearby_revision': '517d77f5aa4fea8f4437125830cfc55f84705e3d',
|
||||
'nearby_revision': '0c8838ad9b9ba5e03ea9dadd0cba5f4ea9c949fd',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling securemessage
|
||||
# and whatever else without interference from each other.
|
||||
@ -408,15 +404,15 @@ vars = {
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'resultdb_version': 'git_revision:735a8a662d3874d8b1d795a40e46ea0f57b52758',
|
||||
'resultdb_version': 'git_revision:6cc18e2763e180929d70c786b419c1f8e6bcc66c',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'libcxxabi_revision': '93b8dcd57bd8ebe201ec24f7257339988ed2ef7c',
|
||||
'libcxxabi_revision': 'e025ba5dc85202540099d7cd8e72eae2d4ee9e33',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
'libunwind_revision': 'd1c7f92b8b0bff8d9f710ca40e44563a63db376e',
|
||||
'libunwind_revision': '1acfbbb4747081789ba48bc8c042fa3c5c8ccaa3',
|
||||
# Three lines of non-changing comments so that
|
||||
# the commit queue can handle CLs rolling feed
|
||||
# and whatever else without interference from each other.
|
||||
@ -435,7 +431,7 @@ vars = {
|
||||
'libcxx_revision': '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
|
||||
|
||||
# GN CIPD package version.
|
||||
'gn_version': 'git_revision:bd99dbf98cbdefe18a4128189665c5761263bcfb',
|
||||
'gn_version': 'git_revision:fd9f2036f26d83f9fcfe93042fb952e5a7fe2167',
|
||||
}
|
||||
|
||||
# Only these hosts are allowed for dependencies in this DEPS file.
|
||||
@ -562,7 +558,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/android_webview/tools/cts_archive',
|
||||
'version': 'ai8Ig4HlO0vG6aP_JP2uhyruE2yPzze8PFP1g8Z4_hgC',
|
||||
'version': 'rzLrTykLB2J7ON1a9_5F7qmkjH3U246nHDPHiTruibUC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -658,17 +654,6 @@ deps = {
|
||||
],
|
||||
},
|
||||
|
||||
'src/tools/clang/dsymutil': {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/llvm-build-tools/dsymutil',
|
||||
'version': 'M56jPzDv1620Rnm__jTMYS62Zi8rxHVq7yw0qeBFEgkC',
|
||||
}
|
||||
],
|
||||
'condition': 'checkout_mac or checkout_ios',
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
|
||||
'src/chrome/test/data/autofill/captured_sites': {
|
||||
'packages': [
|
||||
{
|
||||
@ -735,16 +720,16 @@ deps = {
|
||||
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
|
||||
|
||||
'src/docs/website': {
|
||||
'url': Var('chromium_git') + '/website.git' + '@' + '9f939dcab5c4c8fc5be67578d2da6236349cbd49',
|
||||
'url': Var('chromium_git') + '/website.git' + '@' + '17a7f6a95704dc84abc24ba06252d048de1f54df',
|
||||
},
|
||||
|
||||
'src/ios/third_party/earl_grey2/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '583953e8001af0d0242d6464b382420ccab6bb18',
|
||||
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '98801869816f5272ad89f7f66bec7941960a28ea',
|
||||
'condition': 'checkout_ios',
|
||||
},
|
||||
|
||||
'src/ios/third_party/edo/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + 'f7dea1a5bdc745493aeffece692a4883e85c0e78',
|
||||
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '727e556705278598fce683522beedbb9946bfda0',
|
||||
'condition': 'checkout_ios',
|
||||
},
|
||||
|
||||
@ -759,7 +744,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/ios/third_party/material_components_ios/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '5dce9626ae787c33d7bdbda5f156c1415b986c08',
|
||||
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '04424d224ee9e98c7e5a31e140e31105f07e73f1',
|
||||
'condition': 'checkout_ios',
|
||||
},
|
||||
|
||||
@ -840,7 +825,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/rts/model/linux-amd64',
|
||||
'version': 'l1xDoCBm1rDEFIlePkzB2hTG4r1YvYoxdNBU3SGjTDoC',
|
||||
'version': 'YoP4kTClaepmmjRqVgIPL-uE44odWGlVM8pBRVdTx2AC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -851,7 +836,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/rts/model/mac-amd64',
|
||||
'version': 'ScMUxoCQFi3vFXDAlBj3VezWCnqk9hxpFW8GznMw454C',
|
||||
'version': 'mL4NyynmT1Ubjyy2JUXN4SX80VIVKV66MfgBDu-HLRAC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -862,7 +847,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/rts/model/windows-amd64',
|
||||
'version': 'iZunll1kgfbUFl7u6t5VnY4-MHcjb72ZS9UVDhTAr8cC',
|
||||
'version': 'le7Fn-9wOJ6Ob24B0IvVQY_Sss-rzfQ9xaeovuM0WSUC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -883,10 +868,6 @@ deps = {
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
|
||||
'src/third_party/shaderc/src':
|
||||
Var('chromium_git') + '/external/github.com/google/shaderc.git@' +
|
||||
Var('shaderc_revision'),
|
||||
|
||||
'src/third_party/accessibility_test_framework': {
|
||||
'packages': [
|
||||
{
|
||||
@ -915,7 +896,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/android_ndk': {
|
||||
'url': Var('chromium_git') + '/android_ndk.git' + '@' + '9644104c8cf85bf1bdce5b1c0691e9778572c3f8',
|
||||
'url': Var('chromium_git') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
|
||||
'condition': 'checkout_android_native_support',
|
||||
},
|
||||
|
||||
@ -956,7 +937,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_build_tools/aapt2',
|
||||
'version': 'wicn5Ce1ay6ivbZ1GNFF0gRSS3NYv_7hJTPtVga3O-QC',
|
||||
'version': 'u2Cw4baoLfvlEDMwcJjq9iOJRF0_2BjsgMFl7UhJxGAC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -967,7 +948,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_build_tools/bundletool',
|
||||
'version': 'LoldiQDpZ0uTdAm5EPgZ8hBJ3La2KlTWLuaRxE7eDigC',
|
||||
'version': 'zQILIUnCaQ93HTtR07m4ahlE9mrkkwks52L5vFaUaUUC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -1022,7 +1003,7 @@ deps = {
|
||||
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
|
||||
|
||||
'src/third_party/dav1d/libdav1d':
|
||||
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + 'b1a5189c9d37c837099ce50852b6ce9597b89b0c',
|
||||
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
|
||||
|
||||
'src/third_party/dawn':
|
||||
Var('dawn_git') + '/dawn.git' + '@' + Var('dawn_revision'),
|
||||
@ -1046,17 +1027,6 @@ deps = {
|
||||
'condition': 'checkout_src_internal and checkout_chromeos',
|
||||
},
|
||||
|
||||
'src/third_party/cast_core/prebuilts': {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'cast3p/cast_core/package_qa_vizio_castos_armv7a.tar',
|
||||
'version': 'ZOzcgmEj32ltQEYvSSBfLbwcEo4MA_qtruinWrbbpGAC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_cast3p',
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
|
||||
'src/third_party/google_benchmark/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/google/benchmark.git' + '@' + 'f730846b0a3c0dc0699978846fb14ffb2fad0bdc',
|
||||
'condition': 'checkout_google_benchmark',
|
||||
@ -1077,7 +1047,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/breakpad/breakpad':
|
||||
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + '08bd844599bf04c71707e8f59a8013a941264695',
|
||||
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + '8b68c72a3fff2bb687c7f411e5c1c09e356b8603',
|
||||
|
||||
'src/third_party/byte_buddy': {
|
||||
'packages': [
|
||||
@ -1131,12 +1101,12 @@ deps = {
|
||||
# Tools used when building Chrome for Chrome OS. This affects both the Simple
|
||||
# Chrome workflow, as well as the chromeos-chrome ebuild.
|
||||
'src/third_party/chromite': {
|
||||
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '3d2387a672b437b2d32d83120080a5e593b0e5af',
|
||||
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'eb3547e17504b0d603b4faf2b0caebcc5d9dca93',
|
||||
'condition': 'checkout_chromeos',
|
||||
},
|
||||
|
||||
'src/third_party/cld_3/src':
|
||||
Var('chromium_git') + '/external/github.com/google/cld_3.git' + '@' + '576305689d23da93e2f5e04a3de5afbe60d339da',
|
||||
Var('chromium_git') + '/external/github.com/google/cld_3.git' + '@' + 'b48dc46512566f5a2d41118c8c1116c4f96dc661',
|
||||
|
||||
'src/third_party/colorama/src':
|
||||
Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
|
||||
@ -1146,12 +1116,12 @@ deps = {
|
||||
|
||||
# For Linux and Chromium OS.
|
||||
'src/third_party/cros_system_api': {
|
||||
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '82881bbde1f5ec168806e3b144622aefd66bb2ae',
|
||||
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '90c3a2f21e7d8c6668c9f7daaaf39c5fd8ffe58a',
|
||||
'condition': 'checkout_linux',
|
||||
},
|
||||
|
||||
'src/third_party/depot_tools':
|
||||
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '950a6b4225ed3280aa0fbca2de51bfbaecd7695a',
|
||||
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '932a621ece2316026926d615bb04d3006077ab79',
|
||||
|
||||
'src/third_party/devtools-frontend/src':
|
||||
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
|
||||
@ -1182,7 +1152,7 @@ deps = {
|
||||
Var('chromium_git') + '/external/github.com/google/farmhash.git' + '@' + '816a4ae622e964763ca0862d9dbd19324a1eaf45',
|
||||
|
||||
'src/third_party/ffmpeg':
|
||||
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + '574c39cce3231c69bc9a02ac475c27d944bdb113',
|
||||
Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + 'e481fc655a6287e657a88e8c2bcd6f411d254d70',
|
||||
|
||||
'src/third_party/flac':
|
||||
Var('chromium_git') + '/chromium/deps/flac.git' + '@' + 'af862024c8c8fa0ae07ced05e89013d881b00596',
|
||||
@ -1203,7 +1173,7 @@ deps = {
|
||||
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
|
||||
|
||||
'src/third_party/grpc/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + '2a0d6234cb2ccebb265c035ffd09ecc9a347b4bf',
|
||||
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + 'ee2b75e33740d1a88c0e2aeec1b14435e17a889e',
|
||||
},
|
||||
|
||||
'src/third_party/freetype/src':
|
||||
@ -1291,7 +1261,7 @@ deps = {
|
||||
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
|
||||
|
||||
'src/third_party/icu':
|
||||
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'ea8c08d8783fceda86c19618694881149e23f305',
|
||||
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'e1f2f4f42368555835a7a0894188716556c32871',
|
||||
|
||||
'src/third_party/icu4j': {
|
||||
'packages': [
|
||||
@ -1362,7 +1332,7 @@ deps = {
|
||||
Var('chromium_git') + '/external/libaddressinput.git' + '@' + '3b8ee157a8f3536bbf5ad2448e9e3370463c1e40',
|
||||
|
||||
'src/third_party/libaom/source/libaom':
|
||||
Var('aomedia_git') + '/aom.git' + '@' + 'ee1ed1ccf2b9ecedd6aee438eafc7cc61c23342d',
|
||||
Var('aomedia_git') + '/aom.git' + '@' + 'e24a83a72b507b93a94f299f0eead1213dbac214',
|
||||
|
||||
'src/third_party/libavif/src':
|
||||
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
|
||||
@ -1395,7 +1365,7 @@ deps = {
|
||||
Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git' + '@' + '22f1a22c99e9dde8cd3c72ead333f425c5a7aa77',
|
||||
|
||||
'src/third_party/liblouis/src': {
|
||||
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + '10f66b2735b7d1e3e44d98f6dedbb859d197ea57',
|
||||
'url': Var('chromium_git') + '/external/liblouis-github.git' + '@' + 'c05f3bfb0990434bd12bf6697d16ed943f2203c2',
|
||||
'condition': 'checkout_linux',
|
||||
},
|
||||
|
||||
@ -1420,13 +1390,13 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/libvpx/source/libvpx':
|
||||
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'df0d06de6d3b64e35b9e75ad72c571af061bc7b3',
|
||||
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'bf672f23a5336cb54dbcb2e4417142139f44cc3e',
|
||||
|
||||
'src/third_party/libwebm/source':
|
||||
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
|
||||
|
||||
'src/third_party/libyuv':
|
||||
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '3aebf69d668177e7ee6dbbe0025e5c3dbb525ff2',
|
||||
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '18f91105162a6ebe7a46ee1c81e9ab67ca97a02b',
|
||||
|
||||
'src/third_party/lighttpd': {
|
||||
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
|
||||
@ -1517,7 +1487,7 @@ deps = {
|
||||
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'b52786888ddce9d6bc06b7825ba9bffc65924e0c',
|
||||
|
||||
'src/third_party/openscreen/src':
|
||||
Var('chromium_git') + '/openscreen' + '@' + 'adafe0ae3ed0032a6f96db719f26c511d09f1518',
|
||||
Var('chromium_git') + '/openscreen' + '@' + 'ee7d4e8c5eb35509288a8f238bbf8ef9c3cb9d35',
|
||||
|
||||
'src/third_party/openxr/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
|
||||
@ -1534,7 +1504,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/perfetto':
|
||||
Var('android_git') + '/platform/external/perfetto.git' + '@' + '943905de31ebf8d6a35016a170457dbae3efbecc',
|
||||
Var('android_git') + '/platform/external/perfetto.git' + '@' + '4c15672c0a9e16ac762aa5148f1264350fd49b98',
|
||||
|
||||
'src/third_party/perl': {
|
||||
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
|
||||
@ -1559,7 +1529,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/quic_trace/src':
|
||||
Var('chromium_git') + '/external/github.com/google/quic-trace.git' + '@' + '413c3a4a641c014193eabb8d282c2348ccec3c5b',
|
||||
Var('chromium_git') + '/external/github.com/google/quic-trace.git' + '@' + 'c7b993eb750e60c307e82f75763600d9c06a6de1',
|
||||
|
||||
'src/third_party/pywebsocket3/src':
|
||||
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/pywebsocket3.git' + '@' + '50602a14f1b6da17e0b619833a13addc6ea78bc2',
|
||||
@ -1612,7 +1582,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'fuchsia/third_party/android/aemu/release/linux-amd64',
|
||||
'version': 'jbMpjSzTRngktfwC6FszY0ASXYXkR77i2_dDrvQsa9oC'
|
||||
'version': 'lbYV0rO8V4GxeqmRrKZeRgQmbFxh2BwafFgd9cjYmWYC'
|
||||
},
|
||||
],
|
||||
'condition': 'host_os == "linux" and checkout_fuchsia',
|
||||
@ -1631,7 +1601,7 @@ deps = {
|
||||
},
|
||||
|
||||
'src/third_party/re2/src':
|
||||
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '3be7d1b6b486ecd47b0daa58210051e29fb31122',
|
||||
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'f5c782e5d02b3e7f244274c9b6d9d3d7a9b6e737',
|
||||
|
||||
'src/third_party/r8': {
|
||||
'packages': [
|
||||
@ -1673,7 +1643,7 @@ deps = {
|
||||
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
|
||||
|
||||
'src/third_party/sqlite/src':
|
||||
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '803a31044a01ca3984f7c321c8821974e4100d07',
|
||||
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'a54d5d154f4b349705a67107ed190d1943f94646',
|
||||
|
||||
'src/third_party/sqlite4java': {
|
||||
'packages': [
|
||||
@ -1686,22 +1656,31 @@ deps = {
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
|
||||
'src/third_party/swift-format': {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'infra/3pp/tools/swift-format/${{platform}}',
|
||||
'version': 'version:2@505.chromium.1',
|
||||
},
|
||||
],
|
||||
'condition': 'host_os == mac',
|
||||
'dep_type': 'cipd',
|
||||
},
|
||||
|
||||
'src/third_party/swiftshader':
|
||||
Var('swiftshader_git') + '/SwiftShader.git' + '@' + Var('swiftshader_revision'),
|
||||
|
||||
'src/third_party/text-fragments-polyfill/src': {
|
||||
'url': Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + '5959682856f165b1dc7ddeecdc9d890bfaeb6449',
|
||||
'condition': 'checkout_ios',
|
||||
},
|
||||
'src/third_party/text-fragments-polyfill/src':
|
||||
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + '428dd13167f3ce02e3ca7c086d291d7c079da0dc',
|
||||
|
||||
'src/third_party/tflite/src':
|
||||
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '9768822159624e54ade255b55b8e6a6c7ec5a962',
|
||||
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '551a50e768fc48db8e68356a575763278fa1b3b6',
|
||||
|
||||
'src/third_party/turbine': {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/turbine',
|
||||
'version': 'Go9J3Mz5ankZAgxmn5GxeXKdEDV73zaZp2ojNfGC1RQC',
|
||||
'version': 'y4x80kUnDOxC5QyG48MlVoiRIEn09eaHcIJQFavlqgMC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -1713,10 +1692,7 @@ deps = {
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
|
||||
'src/third_party/usrsctp/usrsctplib':
|
||||
Var('chromium_git') + '/external/github.com/sctplab/usrsctp' + '@' + '62d7d0c928c9a040dce96aa2f16c00e7e67d59cb',
|
||||
|
||||
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@5fe1b21d6ba47a8dee59068d6927a4c157ec13a3',
|
||||
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@65b7b8de281ca44627456ade69c7cba884bd1c87',
|
||||
|
||||
'src/third_party/vulkan_memory_allocator':
|
||||
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
|
||||
@ -1729,7 +1705,7 @@ deps = {
|
||||
|
||||
# Wayland protocols that add functionality not available in the core protocol.
|
||||
'src/third_party/wayland-protocols/src': {
|
||||
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + '177ff9119da526462e5d35fbfde6c84794913787',
|
||||
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + 'd324986823519c15b2162fc3e0a720f349e43b0c',
|
||||
'condition': 'checkout_linux',
|
||||
},
|
||||
|
||||
@ -1752,10 +1728,10 @@ deps = {
|
||||
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'cf04aebdf9b53bb2853f22a81465688daf879ec6',
|
||||
|
||||
'src/third_party/webgpu-cts/src':
|
||||
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'a630866d89f74aa95cf3aecd78987637ee195b68',
|
||||
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '958d732db02c2a70bcf4a2b0986f09318db4adfb',
|
||||
|
||||
'src/third_party/webrtc':
|
||||
Var('webrtc_git') + '/src.git' + '@' + '34e12465a46da3c220eab0489881cd89a4d659d2',
|
||||
Var('webrtc_git') + '/src.git' + '@' + '6ff73180ad01aca444c9856f91148eb2b948ce63',
|
||||
|
||||
'src/third_party/libgifcodec':
|
||||
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
|
||||
@ -1782,7 +1758,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'skia/tools/goldctl/linux-amd64',
|
||||
'version': 'wjKDZ5vJELJ_j3O037nIWhBEMF0cY4Y1g4tLc47hPJoC',
|
||||
'version': 'iqtz2prn9CUv6A8KCcxJzadmPEDLY1FPP-b2YqIFQ1cC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -1792,7 +1768,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'skia/tools/goldctl/windows-amd64',
|
||||
'version': 'Vg04A_bOadtB2ljbA9DGKe69_Uc6pmX5mk_ABoO2R3EC',
|
||||
'version': 'EbVQXa1u0hbZ8pxb0Il6Rbc1ErHpIN_-kMVOzBXMQyoC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -1803,7 +1779,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'skia/tools/goldctl/mac-amd64',
|
||||
'version': 'jw5QK1qcTGSBr-yjH0d-9F_MNeq6e5_5aWLq_oGWy0QC',
|
||||
'version': 'Y4l14LBqCsT9EhffPIOtso9VSpwqQE9WccjZdDBZmLoC',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -1814,7 +1790,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'skia/tools/goldctl/mac-arm64',
|
||||
'version': 'o4BSMT1hKtY4T4VBfANeSm-NuhxoxPYUp3lF0EpoUvMC',
|
||||
'version': 'INEqc8JI_mtww_X0ShOlDkF3S8OG4tjF4Nkei0K7ci8C',
|
||||
},
|
||||
],
|
||||
'dep_type': 'cipd',
|
||||
@ -1825,7 +1801,7 @@ deps = {
|
||||
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
|
||||
|
||||
'src-internal': {
|
||||
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@730d978fb36fbd1d7658f3b8c337a89bfe704cf9',
|
||||
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@ae99ebde2a4de4d30a66c278c396dba703d2845f',
|
||||
'condition': 'checkout_src_internal',
|
||||
},
|
||||
|
||||
@ -1833,7 +1809,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromeos_internal/assistant/ambient',
|
||||
'version': 'version:float_on_by_initial_version',
|
||||
'version': 'version:float_on_by_slower',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_chromeos and checkout_src_internal',
|
||||
@ -1844,7 +1820,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromeos_internal/apps/eche_app/app',
|
||||
'version': 'ZpuWWOZwhn4j7SwQMvPc-NJOqV4p32nHSxVOFDH3a3MC',
|
||||
'version': 'mO7HIkligmD70YaR0NC-cEilQ0xhQYkaBq-8xFFsHAMC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_chromeos and checkout_src_internal',
|
||||
@ -1855,7 +1831,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromeos_internal/apps/help_app/app',
|
||||
'version': '_fsfATiokrgU562IXkEUlu49ttGo2GP6B_3KMV4YGj8C',
|
||||
'version': 'XqfD0KEkd76pT0UI2DhgmgL-CfLXmHeGmHqZ8eSBu-gC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_chromeos and checkout_src_internal',
|
||||
@ -1866,7 +1842,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromeos_internal/apps/media_app/app',
|
||||
'version': '74VBtd9mFKuJjJ4wsplDS7Av21ogn2oCHw3eTIqwGnwC',
|
||||
'version': 'u3LIEepgYh4Lf4_iS5eKnL1K17o6cTHozTZQ6gkS3oUC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_chromeos and checkout_src_internal',
|
||||
@ -1877,7 +1853,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromeos_internal/apps/projector_app/app',
|
||||
'version': 'cvaInkU-R_Gp-n2T4FBD02VwkF3VjZEuBh5sAFLoCf8C',
|
||||
'version': 'zvssMRGkrSKVTUnN3dSa_d3_xPuVJ2aPSmegU-rHbMQC',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_chromeos and checkout_src_internal',
|
||||
@ -3263,7 +3239,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
|
||||
'version': 'version:2@1.6.10.cr1',
|
||||
'version': 'version:2@1.6.20.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3274,7 +3250,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
|
||||
'version': 'version:2@1.6.10.cr1',
|
||||
'version': 'version:2@1.6.20.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3285,7 +3261,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7',
|
||||
'version': 'version:2@1.5.0.cr1',
|
||||
'version': 'version:2@1.6.20.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3296,7 +3272,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8',
|
||||
'version': 'version:2@1.5.0.cr1',
|
||||
'version': 'version:2@1.6.20.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3307,7 +3283,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_android',
|
||||
'version': 'version:2@1.5.0.cr1',
|
||||
'version': 'version:2@1.6.1.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3318,7 +3294,7 @@ deps = {
|
||||
'packages': [
|
||||
{
|
||||
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_core_jvm',
|
||||
'version': 'version:2@1.5.0.cr1',
|
||||
'version': 'version:2@1.6.1.cr1',
|
||||
},
|
||||
],
|
||||
'condition': 'checkout_android',
|
||||
@ -3890,6 +3866,34 @@ hooks = [
|
||||
'-s', 'src/third_party/skia',
|
||||
'--header', 'src/skia/ext/skia_commit_hash.h'],
|
||||
},
|
||||
# Pull dsymutil binaries using checked-in hashes.
|
||||
{
|
||||
'name': 'dsymutil_mac_arm64',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
'action': [ 'python3',
|
||||
'src/third_party/depot_tools/download_from_google_storage.py',
|
||||
'--no_resume',
|
||||
'--no_auth',
|
||||
'--bucket', 'chromium-browser-clang',
|
||||
'-s', 'src/tools/clang/dsymutil/bin/dsymutil.arm64.sha1',
|
||||
'-o', 'src/tools/clang/dsymutil/bin/dsymutil',
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'dsymutil_mac_x64',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
'action': [ 'python3',
|
||||
'src/third_party/depot_tools/download_from_google_storage.py',
|
||||
'--no_resume',
|
||||
'--no_auth',
|
||||
'--bucket', 'chromium-browser-clang',
|
||||
'-s', 'src/tools/clang/dsymutil/bin/dsymutil.x64.sha1',
|
||||
'-o', 'src/tools/clang/dsymutil/bin/dsymutil',
|
||||
],
|
||||
},
|
||||
|
||||
# Pull clang-format binaries using checked-in hashes.
|
||||
{
|
||||
'name': 'clang_format_win',
|
||||
@ -4067,10 +4071,14 @@ hooks = [
|
||||
'-s', 'src/third_party/node/linux/node-linux-x64.tar.gz.sha1',
|
||||
],
|
||||
},
|
||||
# The Mac x64/arm64 binaries are downloaded regardless of host architecture
|
||||
# since it's possible to cross-compile for the other architecture. This can
|
||||
# cause problems for tests that use node if the test device architecture does
|
||||
# not match the architecture of the compile machine.
|
||||
{
|
||||
'name': 'node_mac',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac" and host_cpu == "x64"',
|
||||
'condition': 'host_os == "mac"',
|
||||
'action': [ 'python3',
|
||||
'src/third_party/depot_tools/download_from_google_storage.py',
|
||||
'--no_resume',
|
||||
@ -4083,7 +4091,7 @@ hooks = [
|
||||
{
|
||||
'name': 'node_mac_arm64',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac" and host_cpu == "arm64"',
|
||||
'condition': 'host_os == "mac"',
|
||||
'action': [ 'python3',
|
||||
'src/third_party/depot_tools/download_from_google_storage.py',
|
||||
'--no_resume',
|
||||
@ -4618,13 +4626,13 @@ hooks = [
|
||||
],
|
||||
},
|
||||
|
||||
# Download Cast Web Runtime
|
||||
# Download Cast3p Binaries
|
||||
{
|
||||
'name': 'cast_web_runtime',
|
||||
'name': 'cast3p_binaries',
|
||||
'pattern': '.',
|
||||
'action': [
|
||||
'python3',
|
||||
'src/tools/cast3p/update_runtime.py',
|
||||
'src/tools/cast3p/update_binaries.py',
|
||||
],
|
||||
'condition': 'checkout_cast3p',
|
||||
},
|
||||
@ -4648,7 +4656,7 @@ hooks = [
|
||||
recursedeps = [
|
||||
# ANGLE manages DEPS that it also owns the build files for, such as dEQP.
|
||||
'src/third_party/angle',
|
||||
# Dawn and Tint's revision are linked
|
||||
# Dawn needs CTS from its DEPS
|
||||
'src/third_party/dawn',
|
||||
'src/third_party/openscreen/src',
|
||||
'src/third_party/vulkan-deps',
|
||||
|
@ -36,7 +36,6 @@ import("//build/config/ui.gni")
|
||||
import("//build/rust/mixed_component.gni")
|
||||
import("//build/timestamp.gni")
|
||||
import("//build_overrides/build.gni")
|
||||
import("//third_party/icu/config.gni")
|
||||
|
||||
if (is_mac) {
|
||||
# Used to generate fuzzer corpus :base_mach_port_rendezvous_convert_corpus.
|
||||
@ -115,22 +114,6 @@ config("todo_buildflag_build_rust_json_parser") {
|
||||
]
|
||||
}
|
||||
|
||||
config("memory_tagging") {
|
||||
if (current_cpu == "arm64" && is_clang &&
|
||||
(is_linux || is_chromeos || is_android || is_fuchsia)) {
|
||||
# base/ has access to the MTE intrinsics because it needs to use them,
|
||||
# but they're not backwards compatible. Use base::CPU::has_mte()
|
||||
# beforehand to confirm or use indirect functions (ifuncs) to select
|
||||
# an MTE-specific implementation at dynamic link-time.
|
||||
cflags = [
|
||||
"-Xclang",
|
||||
"-target-feature",
|
||||
"-Xclang",
|
||||
"+mte",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
config("base_implementation") {
|
||||
defines = [ "BASE_IMPLEMENTATION" ]
|
||||
configs = [ "//build/config/compiler:wexit_time_destructors" ]
|
||||
@ -334,7 +317,6 @@ mixed_component("base") {
|
||||
"files/file_error_or.h",
|
||||
"files/file_path.cc",
|
||||
"files/file_path.h",
|
||||
"files/file_path_constants.cc",
|
||||
"files/file_path_watcher.cc",
|
||||
"files/file_path_watcher.h",
|
||||
"files/file_proxy.cc",
|
||||
@ -411,6 +393,10 @@ mixed_component("base") {
|
||||
"memory/nonscannable_memory.cc",
|
||||
"memory/nonscannable_memory.h",
|
||||
"memory/page_size.h",
|
||||
"memory/platform_shared_memory_handle.cc",
|
||||
"memory/platform_shared_memory_handle.h",
|
||||
"memory/platform_shared_memory_mapper.cc",
|
||||
"memory/platform_shared_memory_mapper.h",
|
||||
"memory/platform_shared_memory_region.cc",
|
||||
"memory/platform_shared_memory_region.h",
|
||||
"memory/ptr_util.h",
|
||||
@ -493,6 +479,8 @@ mixed_component("base") {
|
||||
"metrics/persistent_memory_allocator.h",
|
||||
"metrics/persistent_sample_map.cc",
|
||||
"metrics/persistent_sample_map.h",
|
||||
"metrics/ranges_manager.cc",
|
||||
"metrics/ranges_manager.h",
|
||||
"metrics/record_histogram_checker.h",
|
||||
"metrics/sample_map.cc",
|
||||
"metrics/sample_map.h",
|
||||
@ -1093,6 +1081,8 @@ mixed_component("base") {
|
||||
"win/core_winrt_util.cc",
|
||||
"win/core_winrt_util.h",
|
||||
"win/current_module.h",
|
||||
"win/default_apps_util.cc",
|
||||
"win/default_apps_util.h",
|
||||
"win/embedded_i18n/language_selector.cc",
|
||||
"win/embedded_i18n/language_selector.h",
|
||||
"win/enum_variant.cc",
|
||||
@ -1346,8 +1336,6 @@ mixed_component("base") {
|
||||
"debug/proc_maps_linux.cc",
|
||||
"debug/proc_maps_linux.h",
|
||||
"files/dir_reader_linux.h",
|
||||
"files/file_path_watcher_linux.cc",
|
||||
"files/file_path_watcher_linux.h",
|
||||
"files/file_util_linux.cc",
|
||||
"files/scoped_file_linux.cc",
|
||||
"process/internal_linux.cc",
|
||||
@ -1361,6 +1349,13 @@ mixed_component("base") {
|
||||
]
|
||||
}
|
||||
|
||||
if (is_linux || is_chromeos || is_android || is_fuchsia) {
|
||||
sources += [
|
||||
"files/file_path_watcher_inotify.cc",
|
||||
"files/file_path_watcher_inotify.h",
|
||||
]
|
||||
}
|
||||
|
||||
if (!is_nacl) {
|
||||
sources += [
|
||||
"base_paths.cc",
|
||||
@ -1414,7 +1409,6 @@ mixed_component("base") {
|
||||
|
||||
configs += [
|
||||
":base_implementation",
|
||||
":memory_tagging",
|
||||
"//build/config:precompiled_headers",
|
||||
"//build/config/compiler:wglobal_constructors",
|
||||
]
|
||||
@ -1498,10 +1492,14 @@ mixed_component("base") {
|
||||
sources += [
|
||||
"allocator/allocator_shim.cc",
|
||||
"allocator/allocator_shim.h",
|
||||
"allocator/allocator_shim_default_dispatch_to_partition_alloc.cc",
|
||||
"allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||
"allocator/allocator_shim_internals.h",
|
||||
]
|
||||
if (use_partition_alloc) {
|
||||
sources += [
|
||||
"allocator/allocator_shim_default_dispatch_to_partition_alloc.cc",
|
||||
"allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||
]
|
||||
}
|
||||
if (is_android) {
|
||||
sources += [
|
||||
"allocator/allocator_shim_override_cpp_symbols.h",
|
||||
@ -1607,6 +1605,7 @@ mixed_component("base") {
|
||||
"android/scoped_java_ref.cc",
|
||||
"android/scoped_java_ref.h",
|
||||
"android/sys_utils.h",
|
||||
"memory/platform_shared_memory_mapper_android.cc",
|
||||
"memory/platform_shared_memory_region_android.cc",
|
||||
"system/sys_info_android.cc",
|
||||
|
||||
@ -1615,8 +1614,6 @@ mixed_component("base") {
|
||||
"debug/elf_reader.h",
|
||||
"debug/proc_maps_linux.cc",
|
||||
"debug/proc_maps_linux.h",
|
||||
"files/file_path_watcher_linux.cc",
|
||||
"files/file_path_watcher_linux.h",
|
||||
"process/internal_linux.cc",
|
||||
"process/internal_linux.h",
|
||||
"process/memory_linux.cc",
|
||||
@ -1688,7 +1685,6 @@ mixed_component("base") {
|
||||
"files/file_descriptor_watcher_posix.cc",
|
||||
"files/file_descriptor_watcher_posix.h",
|
||||
"files/file_enumerator_posix.cc",
|
||||
"files/file_path_watcher_fuchsia.cc",
|
||||
"files/file_posix.cc",
|
||||
"files/file_util_fuchsia.cc",
|
||||
"files/file_util_posix.cc",
|
||||
@ -1723,6 +1719,7 @@ mixed_component("base") {
|
||||
"fuchsia/startup_context.cc",
|
||||
"fuchsia/startup_context.h",
|
||||
"memory/page_size_posix.cc",
|
||||
"memory/platform_shared_memory_mapper_fuchsia.cc",
|
||||
"memory/platform_shared_memory_region_fuchsia.cc",
|
||||
"message_loop/message_pump_fuchsia.cc",
|
||||
"message_loop/message_pump_fuchsia.h",
|
||||
@ -1839,6 +1836,7 @@ mixed_component("base") {
|
||||
"files/file_path_watcher_stub.cc",
|
||||
"memory/page_size_nacl.cc",
|
||||
"process/process_stubs.cc",
|
||||
"profiler/stack_sampler_posix.cc",
|
||||
"sync_socket_nacl.cc",
|
||||
"threading/platform_thread_linux.cc",
|
||||
]
|
||||
@ -1919,124 +1917,16 @@ mixed_component("base") {
|
||||
"allocator/partition_alloc_features.h",
|
||||
"allocator/partition_alloc_support.cc",
|
||||
"allocator/partition_alloc_support.h",
|
||||
"allocator/partition_allocator/address_pool_manager.cc",
|
||||
"allocator/partition_allocator/address_pool_manager.h",
|
||||
"allocator/partition_allocator/address_pool_manager_bitmap.cc",
|
||||
"allocator/partition_allocator/address_pool_manager_bitmap.h",
|
||||
"allocator/partition_allocator/address_pool_manager_types.h",
|
||||
"allocator/partition_allocator/address_space_randomization.cc",
|
||||
"allocator/partition_allocator/address_space_randomization.h",
|
||||
"allocator/partition_allocator/address_space_stats.h",
|
||||
"allocator/partition_allocator/allocation_guard.cc",
|
||||
"allocator/partition_allocator/allocation_guard.h",
|
||||
"allocator/partition_allocator/dangling_raw_ptr_checks.cc",
|
||||
"allocator/partition_allocator/dangling_raw_ptr_checks.h",
|
||||
"allocator/partition_allocator/extended_api.cc",
|
||||
"allocator/partition_allocator/extended_api.h",
|
||||
"allocator/partition_allocator/memory_reclaimer.cc",
|
||||
"allocator/partition_allocator/memory_reclaimer.h",
|
||||
"allocator/partition_allocator/oom.cc",
|
||||
"allocator/partition_allocator/oom.h",
|
||||
"allocator/partition_allocator/oom_callback.cc",
|
||||
"allocator/partition_allocator/oom_callback.h",
|
||||
"allocator/partition_allocator/page_allocator.cc",
|
||||
"allocator/partition_allocator/page_allocator.h",
|
||||
"allocator/partition_allocator/page_allocator_constants.h",
|
||||
"allocator/partition_allocator/page_allocator_internal.h",
|
||||
"allocator/partition_allocator/partition_address_space.cc",
|
||||
"allocator/partition_allocator/partition_address_space.h",
|
||||
"allocator/partition_allocator/partition_alloc-inl.h",
|
||||
"allocator/partition_allocator/partition_alloc.cc",
|
||||
"allocator/partition_allocator/partition_alloc.h",
|
||||
"allocator/partition_allocator/partition_alloc_check.h",
|
||||
"allocator/partition_allocator/partition_alloc_config.h",
|
||||
"allocator/partition_allocator/partition_alloc_constants.h",
|
||||
"allocator/partition_allocator/partition_alloc_forward.h",
|
||||
"allocator/partition_allocator/partition_alloc_hooks.cc",
|
||||
"allocator/partition_allocator/partition_alloc_hooks.h",
|
||||
"allocator/partition_allocator/partition_alloc_notreached.h",
|
||||
"allocator/partition_allocator/partition_bucket.cc",
|
||||
"allocator/partition_allocator/partition_bucket.h",
|
||||
"allocator/partition_allocator/partition_bucket_lookup.h",
|
||||
"allocator/partition_allocator/partition_cookie.h",
|
||||
"allocator/partition_allocator/partition_direct_map_extent.h",
|
||||
"allocator/partition_allocator/partition_freelist_entry.h",
|
||||
"allocator/partition_allocator/partition_lock.h",
|
||||
"allocator/partition_allocator/partition_oom.cc",
|
||||
"allocator/partition_allocator/partition_oom.h",
|
||||
"allocator/partition_allocator/partition_page.cc",
|
||||
"allocator/partition_allocator/partition_page.h",
|
||||
"allocator/partition_allocator/partition_ref_count.h",
|
||||
"allocator/partition_allocator/partition_root.cc",
|
||||
"allocator/partition_allocator/partition_root.h",
|
||||
"allocator/partition_allocator/partition_stats.cc",
|
||||
"allocator/partition_allocator/partition_stats.h",
|
||||
"allocator/partition_allocator/partition_tls.h",
|
||||
"allocator/partition_allocator/random.cc",
|
||||
"allocator/partition_allocator/random.h",
|
||||
"allocator/partition_allocator/reservation_offset_table.cc",
|
||||
"allocator/partition_allocator/reservation_offset_table.h",
|
||||
"allocator/partition_allocator/spinning_mutex.cc",
|
||||
"allocator/partition_allocator/spinning_mutex.h",
|
||||
"allocator/partition_allocator/starscan/logging.h",
|
||||
"allocator/partition_allocator/starscan/metadata_allocator.cc",
|
||||
"allocator/partition_allocator/starscan/metadata_allocator.h",
|
||||
"allocator/partition_allocator/starscan/pcscan.cc",
|
||||
"allocator/partition_allocator/starscan/pcscan.h",
|
||||
"allocator/partition_allocator/starscan/pcscan_internal.cc",
|
||||
"allocator/partition_allocator/starscan/pcscan_internal.h",
|
||||
"allocator/partition_allocator/starscan/pcscan_scheduling.cc",
|
||||
"allocator/partition_allocator/starscan/pcscan_scheduling.h",
|
||||
"allocator/partition_allocator/starscan/raceful_worklist.h",
|
||||
"allocator/partition_allocator/starscan/scan_loop.h",
|
||||
"allocator/partition_allocator/starscan/snapshot.cc",
|
||||
"allocator/partition_allocator/starscan/snapshot.h",
|
||||
"allocator/partition_allocator/starscan/stack/stack.cc",
|
||||
"allocator/partition_allocator/starscan/stack/stack.h",
|
||||
"allocator/partition_allocator/starscan/starscan_fwd.h",
|
||||
"allocator/partition_allocator/starscan/state_bitmap.h",
|
||||
"allocator/partition_allocator/starscan/stats_collector.cc",
|
||||
"allocator/partition_allocator/starscan/stats_collector.h",
|
||||
"allocator/partition_allocator/starscan/stats_reporter.h",
|
||||
"allocator/partition_allocator/starscan/write_protector.cc",
|
||||
"allocator/partition_allocator/starscan/write_protector.h",
|
||||
"allocator/partition_allocator/tagging.cc",
|
||||
"allocator/partition_allocator/tagging.h",
|
||||
"allocator/partition_allocator/thread_cache.cc",
|
||||
"allocator/partition_allocator/thread_cache.h",
|
||||
"allocator/partition_allocator/yield_processor.h",
|
||||
]
|
||||
if (is_win) {
|
||||
sources += [
|
||||
"allocator/partition_allocator/page_allocator_internals_win.h",
|
||||
"allocator/partition_allocator/partition_tls_win.cc",
|
||||
]
|
||||
} else if (is_posix) {
|
||||
sources += [
|
||||
"allocator/partition_allocator/page_allocator_internals_posix.cc",
|
||||
"allocator/partition_allocator/page_allocator_internals_posix.h",
|
||||
]
|
||||
} else if (is_fuchsia) {
|
||||
sources += [
|
||||
"allocator/partition_allocator/page_allocator_internals_fuchsia.h",
|
||||
]
|
||||
}
|
||||
|
||||
if (current_cpu == "x64") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "allocator/partition_allocator/starscan/stack/asm/x64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "x86") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "allocator/partition_allocator/starscan/stack/asm/x86/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "arm") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "allocator/partition_allocator/starscan/stack/asm/arm/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "arm64") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "allocator/partition_allocator/starscan/stack/asm/arm64/push_registers_asm.cc" ]
|
||||
} else {
|
||||
# To support a trampoline for another arch, please refer to v8/src/heap/base.
|
||||
}
|
||||
# Need this to pass gn check, because gn check doesn't see
|
||||
# BUILDFLAG(USE_PARTITION_ALLOC). A linker will remove all
|
||||
# partition_alloc code if use_partition_alloc = false because no code uses
|
||||
# partition_alloc.
|
||||
public_deps += [ "allocator/partition_allocator:partition_alloc" ]
|
||||
if (current_os == "openwrt") {
|
||||
public_deps -= [ "allocator/partition_allocator:partition_alloc" ]
|
||||
}
|
||||
}
|
||||
|
||||
@ -2044,6 +1934,7 @@ mixed_component("base") {
|
||||
if (is_win) {
|
||||
sources += [
|
||||
"files/file_enumerator_win.cc",
|
||||
"memory/platform_shared_memory_mapper_win.cc",
|
||||
"memory/platform_shared_memory_region_win.cc",
|
||||
"power_monitor/power_monitor_device_source_win.cc",
|
||||
"power_monitor/speed_limit_observer_win.cc",
|
||||
@ -2098,6 +1989,7 @@ mixed_component("base") {
|
||||
"files/file_path_watcher_kqueue.cc",
|
||||
"files/file_path_watcher_kqueue.h",
|
||||
"mac/scoped_typeref.h",
|
||||
"memory/platform_shared_memory_mapper_mac.cc",
|
||||
"memory/platform_shared_memory_region_mac.cc",
|
||||
"message_loop/message_pump_kqueue.cc",
|
||||
"message_loop/message_pump_kqueue.h",
|
||||
@ -2284,7 +2176,10 @@ mixed_component("base") {
|
||||
# Android and MacOS have their own custom shared memory handle
|
||||
# implementations. e.g. due to supporting both POSIX and native handles.
|
||||
if (is_posix && !is_android && !is_mac) {
|
||||
sources += [ "memory/platform_shared_memory_region_posix.cc" ]
|
||||
sources += [
|
||||
"memory/platform_shared_memory_mapper_posix.cc",
|
||||
"memory/platform_shared_memory_region_posix.cc",
|
||||
]
|
||||
}
|
||||
|
||||
if (is_posix && !is_apple) {
|
||||
@ -2602,7 +2497,11 @@ buildflag_header("profiler_buildflags") {
|
||||
header = "profiler_buildflags.h"
|
||||
header_dir = "base/profiler"
|
||||
|
||||
flags = [ "ENABLE_ARM_CFI_TABLE=$enable_arm_cfi_table" ]
|
||||
flags = [
|
||||
"ENABLE_ARM_CFI_TABLE=$enable_arm_cfi_table",
|
||||
"IOS_STACK_PROFILER_ENABLED=$ios_stack_profiler_enabled",
|
||||
"USE_ANDROID_UNWINDER_V2=$use_android_unwinder_v2",
|
||||
]
|
||||
}
|
||||
|
||||
# This is the subset of files from base that should not be used with a dynamic
|
||||
|
@ -7,7 +7,6 @@ danakj@chromium.org
|
||||
dcheng@chromium.org
|
||||
fdoray@chromium.org
|
||||
gab@chromium.org
|
||||
jdoerrie@chromium.org
|
||||
kylechar@chromium.org
|
||||
mark@chromium.org
|
||||
thakis@chromium.org
|
||||
|
@ -22,6 +22,10 @@ buildflag_header("buildflags") {
|
||||
_enable_dangling_raw_ptr_checks =
|
||||
enable_dangling_raw_ptr_checks && _use_backup_ref_ptr
|
||||
|
||||
# MTECheckedPtr is exclusive against BRP (asserted at declaration).
|
||||
# MTECheckedPtr requires 64-bit pointers (not available in NaCl).
|
||||
_use_mte_checked_ptr = use_mte_checked_ptr && !is_nacl
|
||||
|
||||
_record_alloc_info = false
|
||||
|
||||
flags = [
|
||||
@ -35,6 +39,9 @@ buildflag_header("buildflags") {
|
||||
"ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||
|
||||
# Not to be used directly - see `partition_alloc_config.h`.
|
||||
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||
|
||||
"USE_FAKE_BINARY_EXPERIMENT=$use_fake_binary_experiment",
|
||||
|
||||
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||
|
@ -87,8 +87,13 @@ declare_args() {
|
||||
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
|
||||
# of raw_ptr<T>, and enable PartitionAlloc support for it.
|
||||
use_backup_ref_ptr = _is_brp_supported
|
||||
|
||||
use_mte_checked_ptr = false
|
||||
}
|
||||
|
||||
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
|
||||
"MTECheckedPtr conflicts with BRP.")
|
||||
|
||||
declare_args() {
|
||||
# If BRP is enabled, additional options are available:
|
||||
# - put_ref_count_in_previous_slot: place the ref-count at the end of the
|
||||
@ -98,7 +103,7 @@ declare_args() {
|
||||
# are too expensive to have on by default.
|
||||
# - enable_dangling_raw_ptr_checks: enable checking raw_ptr do not become
|
||||
# dangling during their lifetime.
|
||||
put_ref_count_in_previous_slot = false
|
||||
put_ref_count_in_previous_slot = use_backup_ref_ptr
|
||||
enable_backup_ref_ptr_slow_checks = false
|
||||
enable_dangling_raw_ptr_checks = false
|
||||
|
||||
|
@ -20,14 +20,16 @@
|
||||
#include "base/bits.h"
|
||||
#include "base/logging.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
// Defined in base/allocator/partition_allocator/partition_root.cc
|
||||
void PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInParent();
|
||||
void PartitionAllocMallocHookOnAfterForkInChild();
|
||||
|
||||
namespace allocator {
|
||||
} // namespace partition_alloc
|
||||
|
||||
namespace base::allocator {
|
||||
|
||||
namespace {
|
||||
|
||||
@ -66,12 +68,12 @@ void MallocIntrospectionLog(malloc_zone_t* zone, void* address) {
|
||||
|
||||
void MallocIntrospectionForceLock(malloc_zone_t* zone) {
|
||||
// Called before fork(2) to acquire the lock.
|
||||
PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
partition_alloc::PartitionAllocMallocHookOnBeforeForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionForceUnlock(malloc_zone_t* zone) {
|
||||
// Called in the parent process after fork(2) to release the lock.
|
||||
PartitionAllocMallocHookOnAfterForkInParent();
|
||||
partition_alloc::PartitionAllocMallocHookOnAfterForkInParent();
|
||||
}
|
||||
|
||||
void MallocIntrospectionStatistics(malloc_zone_t* zone,
|
||||
@ -111,7 +113,7 @@ void MallocIntrospectionEnumerateDischargedPointers(
|
||||
|
||||
void MallocIntrospectionReinitLock(malloc_zone_t* zone) {
|
||||
// Called in a child process after fork(2) to re-initialize the lock.
|
||||
PartitionAllocMallocHookOnAfterForkInChild();
|
||||
partition_alloc::PartitionAllocMallocHookOnAfterForkInChild();
|
||||
}
|
||||
|
||||
void MallocIntrospectionPrintTask(task_t task,
|
||||
@ -372,5 +374,4 @@ InitializeDefaultMallocZoneWithPartitionAlloc() {
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace allocator
|
||||
} // namespace base
|
||||
} // namespace base::allocator
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "base/allocator/partition_alloc_features.h"
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -79,7 +79,7 @@ constexpr const char* MutatorIdToTracingString(
|
||||
}
|
||||
|
||||
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
|
||||
class StatsReporterImpl final : public StatsReporter {
|
||||
class StatsReporterImpl final : public partition_alloc::StatsReporter {
|
||||
public:
|
||||
void ReportTraceEvent(internal::StatsCollector::ScannerId id,
|
||||
[[maybe_unused]] const PlatformThreadId tid,
|
||||
@ -147,8 +147,10 @@ void RegisterPCScanStatsReporter() {
|
||||
namespace {
|
||||
|
||||
void RunThreadCachePeriodicPurge() {
|
||||
// Micros, since periodic purge should typically take at most a few ms.
|
||||
SCOPED_UMA_HISTOGRAM_TIMER_MICROS("Memory.PartitionAlloc.PeriodicPurge");
|
||||
TRACE_EVENT0("memory", "PeriodicPurge");
|
||||
auto& instance = internal::ThreadCacheRegistry::Instance();
|
||||
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||
instance.RunPeriodicPurge();
|
||||
TimeDelta delay =
|
||||
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||
@ -175,7 +177,7 @@ void RunMemoryReclaimer(scoped_refptr<SequencedTaskRunner> task_runner) {
|
||||
} // namespace
|
||||
|
||||
void StartThreadCachePeriodicPurge() {
|
||||
auto& instance = internal::ThreadCacheRegistry::Instance();
|
||||
auto& instance = ::partition_alloc::ThreadCacheRegistry::Instance();
|
||||
TimeDelta delay =
|
||||
Microseconds(instance.GetPeriodicPurgeNextIntervalInMicroseconds());
|
||||
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
|
||||
|
222
src/base/allocator/partition_allocator/BUILD.gn
Normal file
222
src/base/allocator/partition_allocator/BUILD.gn
Normal file
@ -0,0 +1,222 @@
|
||||
# Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import("//base/allocator/allocator.gni")
|
||||
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||
|
||||
# Add partition_alloc.gni and import it for partition_alloc configs.
|
||||
|
||||
config("partition_alloc_implementation") {
|
||||
# After introducing partition_alloc_export, replace BASE_IMPLEMENTATION with
|
||||
# PARTITION_ALLOC_IMPLEMENTATION.
|
||||
defines = [ "BASE_IMPLEMENTATION" ]
|
||||
}
|
||||
|
||||
config("memory_tagging") {
|
||||
if (current_cpu == "arm64" && is_clang &&
|
||||
(is_linux || is_chromeos || is_android || is_fuchsia)) {
|
||||
# base/ has access to the MTE intrinsics because it needs to use them,
|
||||
# but they're not backwards compatible. Use base::CPU::has_mte()
|
||||
# beforehand to confirm or use indirect functions (ifuncs) to select
|
||||
# an MTE-specific implementation at dynamic link-time.
|
||||
cflags = [
|
||||
"-Xclang",
|
||||
"-target-feature",
|
||||
"-Xclang",
|
||||
"+mte",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
if (is_fuchsia) {
|
||||
config("fuchsia_sync_lib") {
|
||||
libs = [
|
||||
"sync", # Used by spinning_mutex.h.
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
if (make_partition_alloc_standalone) {
|
||||
partition_alloc_target_type = "component"
|
||||
} else {
|
||||
partition_alloc_target_type = "source_set"
|
||||
}
|
||||
|
||||
target(partition_alloc_target_type, "partition_alloc") {
|
||||
sources = [
|
||||
"address_pool_manager.cc",
|
||||
"address_pool_manager.h",
|
||||
"address_pool_manager_bitmap.cc",
|
||||
"address_pool_manager_bitmap.h",
|
||||
"address_pool_manager_types.h",
|
||||
"address_space_randomization.cc",
|
||||
"address_space_randomization.h",
|
||||
"address_space_stats.h",
|
||||
"allocation_guard.cc",
|
||||
"allocation_guard.h",
|
||||
"base/bits.h",
|
||||
"base/migration_adapter.h",
|
||||
"base/sys_byteorder.h",
|
||||
"dangling_raw_ptr_checks.cc",
|
||||
"dangling_raw_ptr_checks.h",
|
||||
"extended_api.cc",
|
||||
"extended_api.h",
|
||||
"memory_reclaimer.cc",
|
||||
"memory_reclaimer.h",
|
||||
"oom.cc",
|
||||
"oom.h",
|
||||
"oom_callback.cc",
|
||||
"oom_callback.h",
|
||||
"page_allocator.cc",
|
||||
"page_allocator.h",
|
||||
"page_allocator_constants.h",
|
||||
"page_allocator_internal.h",
|
||||
"partition_address_space.cc",
|
||||
"partition_address_space.h",
|
||||
"partition_alloc-inl.h",
|
||||
"partition_alloc.cc",
|
||||
"partition_alloc.h",
|
||||
"partition_alloc_check.h",
|
||||
"partition_alloc_config.h",
|
||||
"partition_alloc_constants.h",
|
||||
"partition_alloc_forward.h",
|
||||
"partition_alloc_hooks.cc",
|
||||
"partition_alloc_hooks.h",
|
||||
"partition_alloc_notreached.h",
|
||||
"partition_bucket.cc",
|
||||
"partition_bucket.h",
|
||||
"partition_bucket_lookup.h",
|
||||
"partition_cookie.h",
|
||||
"partition_direct_map_extent.h",
|
||||
"partition_freelist_entry.h",
|
||||
"partition_lock.h",
|
||||
"partition_oom.cc",
|
||||
"partition_oom.h",
|
||||
"partition_page.cc",
|
||||
"partition_page.h",
|
||||
"partition_ref_count.h",
|
||||
"partition_root.cc",
|
||||
"partition_root.h",
|
||||
"partition_stats.cc",
|
||||
"partition_stats.h",
|
||||
"partition_tag.h",
|
||||
"partition_tag_bitmap.h",
|
||||
"partition_tls.h",
|
||||
"random.cc",
|
||||
"random.h",
|
||||
"reservation_offset_table.cc",
|
||||
"reservation_offset_table.h",
|
||||
"spinning_mutex.cc",
|
||||
"spinning_mutex.h",
|
||||
"starscan/logging.h",
|
||||
"starscan/metadata_allocator.cc",
|
||||
"starscan/metadata_allocator.h",
|
||||
"starscan/pcscan.cc",
|
||||
"starscan/pcscan.h",
|
||||
"starscan/pcscan_internal.cc",
|
||||
"starscan/pcscan_internal.h",
|
||||
"starscan/pcscan_scheduling.cc",
|
||||
"starscan/pcscan_scheduling.h",
|
||||
"starscan/raceful_worklist.h",
|
||||
"starscan/scan_loop.h",
|
||||
"starscan/snapshot.cc",
|
||||
"starscan/snapshot.h",
|
||||
"starscan/stack/stack.cc",
|
||||
"starscan/stack/stack.h",
|
||||
"starscan/starscan_fwd.h",
|
||||
"starscan/state_bitmap.h",
|
||||
"starscan/stats_collector.cc",
|
||||
"starscan/stats_collector.h",
|
||||
"starscan/stats_reporter.h",
|
||||
"starscan/write_protector.cc",
|
||||
"starscan/write_protector.h",
|
||||
"tagging.cc",
|
||||
"tagging.h",
|
||||
"thread_cache.cc",
|
||||
"thread_cache.h",
|
||||
"yield_processor.h",
|
||||
]
|
||||
defines = []
|
||||
if (is_win) {
|
||||
sources += [
|
||||
"page_allocator_internals_win.h",
|
||||
"partition_tls_win.cc",
|
||||
]
|
||||
} else if (is_posix) {
|
||||
sources += [
|
||||
"page_allocator_internals_posix.cc",
|
||||
"page_allocator_internals_posix.h",
|
||||
]
|
||||
} else if (is_fuchsia) {
|
||||
sources += [ "page_allocator_internals_fuchsia.h" ]
|
||||
}
|
||||
if (current_cpu == "x64") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "x86") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "arm") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ]
|
||||
} else if (current_cpu == "arm64") {
|
||||
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
|
||||
sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ]
|
||||
} else {
|
||||
# To support a trampoline for another arch, please refer to v8/src/heap/base.
|
||||
}
|
||||
public_deps = [
|
||||
"//base:debugging_buildflags",
|
||||
"//base:logging_buildflags",
|
||||
"//base:synchronization_buildflags",
|
||||
"//base:tracing_buildflags",
|
||||
"//base/allocator:buildflags",
|
||||
"//build:branding_buildflags",
|
||||
"//build:chromecast_buildflags",
|
||||
"//build:chromeos_buildflags",
|
||||
"//build/config/compiler:compiler_buildflags",
|
||||
]
|
||||
deps = []
|
||||
configs += [
|
||||
":partition_alloc_implementation",
|
||||
":memory_tagging",
|
||||
]
|
||||
public_configs = []
|
||||
if (is_android) {
|
||||
# tagging.cc requires __arm_mte_set_* functions.
|
||||
deps += [ "//third_party/android_ndk:cpu_features" ]
|
||||
}
|
||||
if (is_fuchsia) {
|
||||
public_deps += [
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/fit",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/zx",
|
||||
]
|
||||
|
||||
# Needed for users of spinning_mutex.h, which for performance reasons,
|
||||
# contains inlined calls to `libsync` inside the header file.
|
||||
# It appends an entry to the "libs" section of the dependent target.
|
||||
public_configs += [ ":fuchsia_sync_lib" ]
|
||||
}
|
||||
|
||||
frameworks = []
|
||||
if (is_mac) {
|
||||
# SecTaskGetCodeSignStatus needs:
|
||||
frameworks += [ "Security.framework" ]
|
||||
}
|
||||
}
|
||||
|
||||
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
|
||||
# move test code here. i.e. test("partition_alloc_tests") { ... } and
|
||||
# test("partition_alloc_perftests").
|
||||
|
||||
# TODO(crbug.com/1151236): Generate partition_alloc_buildflags. The following
|
||||
# flags will be defined by the buildflags:
|
||||
# "USE_BACKUP_REF_PTR=$_use_backup_ref_ptr",
|
||||
# "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$_enable_backup_ref_ptr_slow_checks",
|
||||
# "ENABLE_DANGLING_RAW_PTR_CHECKS=$_enable_dangling_raw_ptr_checks",
|
||||
# "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
|
||||
# "USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
|
||||
# "RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||
|
@ -8,7 +8,6 @@ include_rules = [
|
||||
"+base/allocator/buildflags.h",
|
||||
"+base/base_export.h",
|
||||
"+base/bind.h",
|
||||
"+base/bits.h",
|
||||
"+base/callback.h",
|
||||
"+base/check.h",
|
||||
"+base/check_op.h",
|
||||
@ -37,7 +36,6 @@ include_rules = [
|
||||
"+base/process/memory.h",
|
||||
"+base/rand_util.h",
|
||||
"+base/strings/stringprintf.h",
|
||||
"+base/sys_byteorder.h",
|
||||
"+base/system/sys_info.h",
|
||||
"+base/test/bind.h",
|
||||
"+base/test/gtest_util.h",
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if !defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
@ -121,6 +121,21 @@ AslrMask(uintptr_t bits) {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
#elif BUILDFLAG(IS_LINUX)
|
||||
|
||||
// Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on
|
||||
// page size and number of levels of translation pages used. We use
|
||||
// 39-bit as base as all setups should support this, lowered to 38-bit
|
||||
// as ASLROffset() could cause a carry.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
|
||||
ASLROffset() {
|
||||
return AslrAddress(0x1000000000ULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
@ -13,14 +14,14 @@ namespace partition_alloc {
|
||||
#if defined(PA_HAS_ALLOCATION_GUARD)
|
||||
|
||||
// Disallow allocations in the scope. Does not nest.
|
||||
class ScopedDisallowAllocations {
|
||||
class BASE_EXPORT ScopedDisallowAllocations {
|
||||
public:
|
||||
ScopedDisallowAllocations();
|
||||
~ScopedDisallowAllocations();
|
||||
};
|
||||
|
||||
// Disallow allocations in the scope. Does not nest.
|
||||
class ScopedAllowAllocations {
|
||||
class BASE_EXPORT ScopedAllowAllocations {
|
||||
public:
|
||||
ScopedAllowAllocations();
|
||||
~ScopedAllowAllocations();
|
||||
|
240
src/base/allocator/partition_allocator/base/bits.h
Normal file
240
src/base/allocator/partition_allocator/base/bits.h
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This file defines some bit utilities.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_
|
||||
|
||||
#include <climits>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <type_traits>
|
||||
|
||||
#include "base/allocator/partition_allocator/base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(COMPILER_MSVC)
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
namespace partition_alloc::internal::base::bits {
|
||||
|
||||
// Returns true iff |value| is a power of 2.
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||
constexpr bool IsPowerOfTwo(T value) {
|
||||
// From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
|
||||
//
|
||||
// Only positive integers with a single bit set are powers of two. If only one
|
||||
// bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
|
||||
// to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
|
||||
// |x & (x-1)| is 0 iff x is a power of two.
|
||||
return value > 0 && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
// Round down |size| to a multiple of alignment, which must be a power of two.
|
||||
inline constexpr size_t AlignDown(size_t size, size_t alignment) {
|
||||
PA_DCHECK(IsPowerOfTwo(alignment));
|
||||
return size & ~(alignment - 1);
|
||||
}
|
||||
|
||||
// Move |ptr| back to the previous multiple of alignment, which must be a power
|
||||
// of two. Defined for types where sizeof(T) is one byte.
|
||||
template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
|
||||
inline T* AlignDown(T* ptr, size_t alignment) {
|
||||
return reinterpret_cast<T*>(
|
||||
AlignDown(reinterpret_cast<size_t>(ptr), alignment));
|
||||
}
|
||||
|
||||
// Round up |size| to a multiple of alignment, which must be a power of two.
|
||||
inline constexpr size_t AlignUp(size_t size, size_t alignment) {
|
||||
PA_DCHECK(IsPowerOfTwo(alignment));
|
||||
return (size + alignment - 1) & ~(alignment - 1);
|
||||
}
|
||||
|
||||
// Advance |ptr| to the next multiple of alignment, which must be a power of
|
||||
// two. Defined for types where sizeof(T) is one byte.
|
||||
template <typename T, typename = typename std::enable_if<sizeof(T) == 1>::type>
|
||||
inline T* AlignUp(T* ptr, size_t alignment) {
|
||||
return reinterpret_cast<T*>(
|
||||
AlignUp(reinterpret_cast<size_t>(ptr), alignment));
|
||||
}
|
||||
|
||||
// CountLeadingZeroBits(value) returns the number of zero bits following the
|
||||
// most significant 1 bit in |value| if |value| is non-zero, otherwise it
|
||||
// returns {sizeof(T) * 8}.
|
||||
// Example: 00100010 -> 2
|
||||
//
|
||||
// CountTrailingZeroBits(value) returns the number of zero bits preceding the
|
||||
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
|
||||
// returns {sizeof(T) * 8}.
|
||||
// Example: 00100010 -> 1
|
||||
//
|
||||
// C does not have an operator to do this, but fortunately the various
|
||||
// compilers have built-ins that map to fast underlying processor instructions.
|
||||
//
|
||||
// Prefer the clang path on Windows, as _BitScanReverse() and friends are not
|
||||
// constexpr.
|
||||
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
|
||||
unsigned>::type
|
||||
CountLeadingZeroBits(T x) {
|
||||
static_assert(bits > 0, "invalid instantiation");
|
||||
unsigned long index;
|
||||
return LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
|
||||
? (31 - index - (32 - bits))
|
||||
: bits;
|
||||
}
|
||||
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
|
||||
unsigned>::type
|
||||
CountLeadingZeroBits(T x) {
|
||||
static_assert(bits > 0, "invalid instantiation");
|
||||
unsigned long index;
|
||||
// MSVC only supplies _BitScanReverse64 when building for a 64-bit target.
|
||||
#if defined(ARCH_CPU_64_BITS)
|
||||
return LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
|
||||
? (63 - index)
|
||||
: 64;
|
||||
#else
|
||||
uint32_t left = static_cast<uint32_t>(x >> 32);
|
||||
if (LIKELY(_BitScanReverse(&index, left)))
|
||||
return 31 - index;
|
||||
|
||||
uint32_t right = static_cast<uint32_t>(x);
|
||||
if (LIKELY(_BitScanReverse(&index, right)))
|
||||
return 63 - index;
|
||||
|
||||
return 64;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
|
||||
unsigned>::type
|
||||
CountTrailingZeroBits(T x) {
|
||||
static_assert(bits > 0, "invalid instantiation");
|
||||
unsigned long index;
|
||||
return LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
|
||||
: bits;
|
||||
}
|
||||
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
|
||||
unsigned>::type
|
||||
CountTrailingZeroBits(T x) {
|
||||
static_assert(bits > 0, "invalid instantiation");
|
||||
unsigned long index;
|
||||
// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
|
||||
#if defined(ARCH_CPU_64_BITS)
|
||||
return LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
|
||||
: 64;
|
||||
#else
|
||||
uint32_t right = static_cast<uint32_t>(x);
|
||||
if (LIKELY(_BitScanForward(&index, right)))
|
||||
return index;
|
||||
|
||||
uint32_t left = static_cast<uint32_t>(x >> 32);
|
||||
if (LIKELY(_BitScanForward(&index, left)))
|
||||
return 32 + index;
|
||||
|
||||
return 64;
|
||||
#endif
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
|
||||
return CountLeadingZeroBits(x);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
|
||||
return CountLeadingZeroBits(x);
|
||||
}
|
||||
|
||||
#elif defined(COMPILER_GCC) || defined(__clang__)
|
||||
|
||||
// __builtin_clz has undefined behaviour for an input of 0, even though there's
|
||||
// clearly a return value that makes sense, and even though some processor clz
|
||||
// instructions have defined behaviour for 0. We could drop to raw __asm__ to
|
||||
// do better, but we'll avoid doing that unless we see proof that we need to.
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE constexpr
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
|
||||
unsigned>::type
|
||||
CountLeadingZeroBits(T value) {
|
||||
static_assert(bits > 0, "invalid instantiation");
|
||||
return LIKELY(value)
|
||||
? bits == 64
|
||||
? __builtin_clzll(static_cast<uint64_t>(value))
|
||||
: __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
|
||||
: bits;
|
||||
}
|
||||
|
||||
template <typename T, unsigned bits = sizeof(T) * 8>
|
||||
ALWAYS_INLINE constexpr
|
||||
typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
|
||||
unsigned>::type
|
||||
CountTrailingZeroBits(T value) {
|
||||
return LIKELY(value) ? bits == 64
|
||||
? __builtin_ctzll(static_cast<uint64_t>(value))
|
||||
: __builtin_ctz(static_cast<uint32_t>(value))
|
||||
: bits;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE constexpr uint32_t CountLeadingZeroBits32(uint32_t x) {
|
||||
return CountLeadingZeroBits(x);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE constexpr uint64_t CountLeadingZeroBits64(uint64_t x) {
|
||||
return CountLeadingZeroBits(x);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
ALWAYS_INLINE constexpr size_t CountLeadingZeroBitsSizeT(size_t x) {
|
||||
return CountLeadingZeroBits(x);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE constexpr size_t CountTrailingZeroBitsSizeT(size_t x) {
|
||||
return CountTrailingZeroBits(x);
|
||||
}
|
||||
|
||||
// Returns the integer i such as 2^i <= n < 2^(i+1).
|
||||
//
|
||||
// There is a common `BitLength` function, which returns the number of bits
|
||||
// required to represent a value. Rather than implement that function,
|
||||
// use `Log2Floor` and add 1 to the result.
|
||||
constexpr int Log2Floor(uint32_t n) {
|
||||
return 31 - CountLeadingZeroBits(n);
|
||||
}
|
||||
|
||||
// Returns the integer i such as 2^(i-1) < n <= 2^i.
|
||||
constexpr int Log2Ceiling(uint32_t n) {
|
||||
// When n == 0, we want the function to return -1.
|
||||
// When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
|
||||
// why the statement below starts with (n ? 32 : -1).
|
||||
return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
|
||||
}
|
||||
|
||||
// Returns a value of type T with a single bit set in the left-most position.
|
||||
// Can be used instead of manually shifting a 1 to the left.
|
||||
template <typename T>
|
||||
constexpr T LeftmostBit() {
|
||||
static_assert(std::is_integral<T>::value,
|
||||
"This function can only be used with integral types.");
|
||||
T one(1u);
|
||||
return one << ((CHAR_BIT * sizeof(T) - 1));
|
||||
}
|
||||
|
||||
} // namespace partition_alloc::internal::base::bits
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_
|
132
src/base/allocator/partition_allocator/base/migration_adapter.h
Normal file
132
src/base/allocator/partition_allocator/base/migration_adapter.h
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
class LapTimer;
|
||||
class PlatformThread;
|
||||
class PlatformThreadHandle;
|
||||
class PlatformThreadRef;
|
||||
class TimeDelta;
|
||||
class TimeTicks;
|
||||
class CPU;
|
||||
|
||||
template <typename Type, typename Traits>
|
||||
class LazyInstance;
|
||||
|
||||
template <typename Type>
|
||||
struct LazyInstanceTraitsBase;
|
||||
|
||||
template <typename T>
|
||||
constexpr TimeDelta Seconds(T n);
|
||||
template <typename T>
|
||||
constexpr TimeDelta Milliseconds(T n);
|
||||
template <typename T>
|
||||
constexpr TimeDelta Microseconds(T n);
|
||||
|
||||
BASE_EXPORT uint64_t RandGenerator(uint64_t range);
|
||||
BASE_EXPORT std::string StringPrintf(const char* format, ...);
|
||||
|
||||
template <typename T, typename O>
|
||||
class NoDestructor;
|
||||
|
||||
namespace debug {
|
||||
|
||||
void BASE_EXPORT Alias(const void* var);
|
||||
|
||||
} // namespace debug
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
class CheckedNumeric;
|
||||
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename CFT>
|
||||
struct ScopedCFTypeRefTraits;
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename T, typename Traits>
|
||||
class ScopedTypeRef;
|
||||
|
||||
namespace mac {
|
||||
|
||||
template <typename T>
|
||||
T CFCast(const CFTypeRef& cf_val);
|
||||
template <typename T>
|
||||
T CFCastStrict(const CFTypeRef& cf_val);
|
||||
|
||||
bool IsAtLeastOS10_14();
|
||||
bool IsOS10_11();
|
||||
|
||||
} // namespace mac
|
||||
#endif // BUILDFLAG(IS_MAC)
|
||||
|
||||
} // namespace base
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::CPU;
|
||||
using ::base::LapTimer;
|
||||
using ::base::LazyInstance;
|
||||
using ::base::LazyInstanceTraitsBase;
|
||||
using ::base::Microseconds;
|
||||
using ::base::Milliseconds;
|
||||
using ::base::NoDestructor;
|
||||
using ::base::PlatformThread;
|
||||
using ::base::PlatformThreadHandle;
|
||||
using ::base::PlatformThreadRef;
|
||||
using ::base::RandGenerator;
|
||||
using ::base::Seconds;
|
||||
using ::base::StringPrintf;
|
||||
using ::base::TimeDelta;
|
||||
using ::base::TimeTicks;
|
||||
using ::base::internal::CheckedNumeric;
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
template <typename CFT>
|
||||
using ScopedCFTypeRef =
|
||||
::base::ScopedTypeRef<CFT, ::base::internal::ScopedCFTypeRefTraits<CFT>>;
|
||||
#endif
|
||||
|
||||
namespace debug {
|
||||
|
||||
using ::base::debug::Alias;
|
||||
|
||||
} // namespace debug
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
namespace mac {
|
||||
|
||||
using ::base::mac::CFCast;
|
||||
using ::base::mac::IsAtLeastOS10_14;
|
||||
using ::base::mac::IsOS10_11;
|
||||
|
||||
} // namespace mac
|
||||
#endif // BUILDFLAG(IS_MAC)
|
||||
|
||||
} // namespace partition_alloc::internal::base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_
|
144
src/base/allocator/partition_allocator/base/sys_byteorder.h
Normal file
144
src/base/allocator/partition_allocator/base/sys_byteorder.h
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This header defines cross-platform ByteSwap() implementations for 16, 32 and
|
||||
// 64-bit values, and NetToHostXX() / HostToNextXX() functions equivalent to
|
||||
// the traditional ntohX() and htonX() functions.
|
||||
// Use the functions defined here rather than using the platform-specific
|
||||
// functions directly.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/partition_allocator/base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(COMPILER_MSVC)
|
||||
#include <cstdlib>
|
||||
#endif
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
||||
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
|
||||
inline uint16_t ByteSwap(uint16_t x) {
|
||||
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||
return _byteswap_ushort(x);
|
||||
#else
|
||||
return __builtin_bswap16(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline uint32_t ByteSwap(uint32_t x) {
|
||||
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||
return _byteswap_ulong(x);
|
||||
#else
|
||||
return __builtin_bswap32(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline constexpr uint64_t ByteSwap(uint64_t x) {
|
||||
// Per build/build_config.h, clang masquerades as MSVC on Windows. If we are
|
||||
// actually using clang, we can rely on the builtin.
|
||||
//
|
||||
// This matters in practice, because on x86(_64), this is a single "bswap"
|
||||
// instruction. MSVC correctly replaces the call with an inlined bswap at /O2
|
||||
// as of 2021, but clang as we use it in Chromium doesn't, keeping a function
|
||||
// call for a single instruction.
|
||||
#if defined(COMPILER_MSVC) && !defined(__clang__)
|
||||
return _byteswap_uint64(x);
|
||||
#else
|
||||
return __builtin_bswap64(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline constexpr uintptr_t ByteSwapUintPtrT(uintptr_t x) {
|
||||
// We do it this way because some build configurations are ILP32 even when
|
||||
// defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
|
||||
// because these conditionals are constexprs, the irrelevant branches will
|
||||
// likely be optimized away, so this construction should not result in code
|
||||
// bloat.
|
||||
static_assert(sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8,
|
||||
"Unsupported uintptr_t size");
|
||||
if (sizeof(uintptr_t) == 4)
|
||||
return ByteSwap(static_cast<uint32_t>(x));
|
||||
return ByteSwap(static_cast<uint64_t>(x));
|
||||
}
|
||||
|
||||
// Converts the bytes in |x| from host order (endianness) to little endian, and
|
||||
// returns the result.
|
||||
inline uint16_t ByteSwapToLE16(uint16_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return x;
|
||||
#else
|
||||
return ByteSwap(x);
|
||||
#endif
|
||||
}
|
||||
inline uint32_t ByteSwapToLE32(uint32_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return x;
|
||||
#else
|
||||
return ByteSwap(x);
|
||||
#endif
|
||||
}
|
||||
inline uint64_t ByteSwapToLE64(uint64_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return x;
|
||||
#else
|
||||
return ByteSwap(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Converts the bytes in |x| from network to host order (endianness), and
|
||||
// returns the result.
|
||||
inline uint16_t NetToHost16(uint16_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
inline uint32_t NetToHost32(uint32_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
inline uint64_t NetToHost64(uint64_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Converts the bytes in |x| from host to network order (endianness), and
|
||||
// returns the result.
|
||||
inline uint16_t HostToNet16(uint16_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
inline uint32_t HostToNet32(uint32_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
inline uint64_t HostToNet64(uint64_t x) {
|
||||
#if defined(ARCH_CPU_LITTLE_ENDIAN)
|
||||
return ByteSwap(x);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace partition_alloc::internal::base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_
|
90
src/base/allocator/partition_allocator/build_config.md
Normal file
90
src/base/allocator/partition_allocator/build_config.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Build Config
|
||||
|
||||
PartitionAlloc's behavior and operation can be influenced by many
|
||||
different settings. Broadly, these are controlled at the top-level by
|
||||
[GN args][gn-declare-args], which propagate via
|
||||
[buildflags][buildflag-header] and `#defined` clauses.
|
||||
|
||||
*** promo
|
||||
Most of what you'll want to know exists between
|
||||
|
||||
* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
|
||||
* [`allocator.gni`][allocator-gni],
|
||||
* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and
|
||||
* [`//base/BUILD.gn`][base-build-gn].
|
||||
***
|
||||
|
||||
*** aside
|
||||
While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of
|
||||
PartitionAlloc's behavior is governed by compound conditions `#defined`
|
||||
in [`partition_alloc_config.h`][partition-alloc-config].
|
||||
***
|
||||
|
||||
## Select GN Args
|
||||
|
||||
### `use_partition_alloc`
|
||||
|
||||
Defines whether PartitionAlloc is at all available.
|
||||
|
||||
Setting this `false` will entirely remove PartitionAlloc from the
|
||||
Chromium build. _You probably do not want this._
|
||||
|
||||
*** note
|
||||
Back when PartitionAlloc was the dedicated allocator in Blink, disabling
|
||||
it was logically identical to wholly disabling it in Chromium. This GN
|
||||
arg organically grew in scope with the advent of
|
||||
PartitionAlloc-Everywhere and must be `true` as a prerequisite for
|
||||
enabling PA-E.
|
||||
***
|
||||
|
||||
### `use_allocator`
|
||||
|
||||
Does nothing special when value is `"none"`. Enables
|
||||
[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is
|
||||
`"partition"`.
|
||||
|
||||
*** note
|
||||
* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically
|
||||
includes Blink where PartitionAlloc originated, setting
|
||||
`use_allocator = "none"` does not disable PA usage in Blink.
|
||||
* `use_allocator = "partition"` internally sets
|
||||
`use_partition_alloc_as_malloc = true`, which must not be confused
|
||||
with `use_partition_alloc` (see above).
|
||||
***
|
||||
|
||||
### `use_backup_ref_ptr`
|
||||
|
||||
Specifies `BackupRefPtr` as the implementation for `base::raw_ptr<T>`
|
||||
when `true`. See the [MiraclePtr documentation][miracleptr-doc].
|
||||
|
||||
*** aside
|
||||
BRP requires support from PartitionAlloc, so `use_backup_ref_ptr` also
|
||||
compiles the relevant code into PA. However, this arg does _not_ govern
|
||||
whether or not BRP is actually enabled at runtime - that functionality
|
||||
is controlled by a Finch flag.
|
||||
***
|
||||
|
||||
## Note: Component Builds
|
||||
|
||||
When working on PartitionAlloc, know that `is_debug` defaults to
|
||||
implying `is_component_build`, which interferes with the allocator
|
||||
shim. A typical set of GN args should include
|
||||
|
||||
```none
|
||||
is_debug = true
|
||||
is_component_build = false
|
||||
```
|
||||
|
||||
Conversely, build configurations that have `is_component_build = true`
|
||||
without explicitly specifying PA-specific args will not build with PA-E
|
||||
enabled.
|
||||
|
||||
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
|
||||
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
|
||||
[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
|
||||
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
|
||||
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
|
||||
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
|
||||
[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h
|
||||
[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview
|
||||
[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview
|
@ -8,7 +8,7 @@
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
#if defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
|
||||
@ -20,7 +20,7 @@ void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||
if (!root || !root->with_thread_cache)
|
||||
return;
|
||||
|
||||
internal::ThreadCacheRegistry::Instance().PurgeAll();
|
||||
ThreadCacheRegistry::Instance().PurgeAll();
|
||||
root->with_thread_cache = false;
|
||||
// Doesn't destroy the thread cache object(s). For background threads, they
|
||||
// will be collected (and free cached memory) at thread destruction
|
||||
@ -36,13 +36,14 @@ void EnablePartitionAllocThreadCacheForRootIfDisabled(
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
void DisablePartitionAllocThreadCacheForProcess() {
|
||||
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
|
||||
auto* aligned_allocator = internal::PartitionAllocMalloc::AlignedAllocator();
|
||||
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||
auto* aligned_allocator =
|
||||
::base::internal::PartitionAllocMalloc::AlignedAllocator();
|
||||
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||
if (aligned_allocator != regular_allocator)
|
||||
DisableThreadCacheForRootIfEnabled(aligned_allocator);
|
||||
DisableThreadCacheForRootIfEnabled(
|
||||
internal::PartitionAllocMalloc::OriginalAllocator());
|
||||
::base::internal::PartitionAllocMalloc::OriginalAllocator());
|
||||
}
|
||||
#endif // defined(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
@ -56,10 +57,10 @@ void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
DisablePartitionAllocThreadCacheForProcess();
|
||||
#else
|
||||
PA_CHECK(!internal::ThreadCache::IsValid(internal::ThreadCache::Get()));
|
||||
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
internal::ThreadCache::SwapForTesting(root);
|
||||
ThreadCache::SwapForTesting(root);
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
|
||||
|
||||
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
@ -72,15 +73,15 @@ void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
DisableThreadCacheForRootIfEnabled(root);
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* regular_allocator = internal::PartitionAllocMalloc::Allocator();
|
||||
auto* regular_allocator = ::base::internal::PartitionAllocMalloc::Allocator();
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||
|
||||
internal::ThreadCache::SwapForTesting(regular_allocator);
|
||||
ThreadCache::SwapForTesting(regular_allocator);
|
||||
#else
|
||||
internal::ThreadCache::SwapForTesting(nullptr);
|
||||
ThreadCache::SwapForTesting(nullptr);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc::internal {
|
||||
// These two functions are unsafe to run if there are multiple threads running
|
||||
// in the process.
|
||||
//
|
||||
@ -22,6 +22,6 @@ BASE_EXPORT void SwapOutProcessThreadCacheForTesting(
|
||||
BASE_EXPORT void SwapInProcessThreadCacheForTesting(
|
||||
ThreadSafePartitionRoot* root);
|
||||
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
|
@ -81,7 +81,7 @@ void MemoryReclaimer::Reclaim(int flags) {
|
||||
// as there is periodic purge which makes sure that it doesn't take too much
|
||||
// space.
|
||||
if (flags & PurgeFlags::kAggressiveReclaim)
|
||||
base::internal::ThreadCacheRegistry::Instance().PurgeAll();
|
||||
ThreadCacheRegistry::Instance().PurgeAll();
|
||||
#endif
|
||||
|
||||
for (auto* partition : partitions_)
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/time/time.h"
|
||||
|
@ -8,10 +8,10 @@
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_space_randomization.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_internal.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||
#include "base/bits.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
@ -76,7 +76,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
|
||||
uintptr_t alignment_offset,
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
PA_DCHECK(base_length >= trim_length);
|
||||
PA_DCHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(alignment_offset < alignment);
|
||||
uintptr_t new_base =
|
||||
NextAlignedWithOffset(base_address, alignment, alignment_offset);
|
||||
@ -105,7 +105,7 @@ uintptr_t TrimMapping(uintptr_t base_address,
|
||||
uintptr_t NextAlignedWithOffset(uintptr_t address,
|
||||
uintptr_t alignment,
|
||||
uintptr_t requested_offset) {
|
||||
PA_DCHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
|
||||
PA_DCHECK(requested_offset < alignment);
|
||||
|
||||
uintptr_t actual_offset = address & (alignment - 1);
|
||||
@ -175,7 +175,7 @@ uintptr_t AllocPagesWithAlignOffset(
|
||||
PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(align >= internal::PageAllocationGranularity());
|
||||
// Alignment must be power of 2 for masking math to work.
|
||||
PA_DCHECK(base::bits::IsPowerOfTwo(align));
|
||||
PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
|
||||
PA_DCHECK(align_offset < align);
|
||||
PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
|
||||
PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
|
||||
|
@ -24,6 +24,31 @@
|
||||
// elimination.
|
||||
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
// This should work for all POSIX (if needed), but currently all other
|
||||
// supported OS/architecture combinations use either hard-coded values
|
||||
// (such as x86) or have means to determine these values without needing
|
||||
// atomics (such as macOS on arm64).
|
||||
|
||||
// Page allocator constants are run-time constant
|
||||
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
|
||||
|
||||
#include <unistd.h>
|
||||
#include <atomic>
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Holds the current page size and shift, where size = 1 << shift
|
||||
// Use PageAllocationGranularity(), PageAllocationGranularityShift()
|
||||
// to initialize and retrieve these values safely.
|
||||
struct PageCharacteristics {
|
||||
std::atomic<int> size;
|
||||
std::atomic<int> shift;
|
||||
};
|
||||
extern PageCharacteristics page_characteristics;
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#else
|
||||
|
||||
// When defined, page size constants are fixed at compile time. When not
|
||||
@ -38,6 +63,10 @@
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Forward declaration, implementation below
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularity();
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularityShift() {
|
||||
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
|
||||
@ -50,6 +79,15 @@ PageAllocationGranularityShift() {
|
||||
return 14; // 16kB
|
||||
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
return vm_page_shift;
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
// arm64 supports 4kb (shift = 12), 16kb (shift = 14), and 64kb (shift = 16)
|
||||
// page sizes. Retrieve from or initialize cache.
|
||||
int shift = page_characteristics.shift.load(std::memory_order_relaxed);
|
||||
if (UNLIKELY(shift == 0)) {
|
||||
shift = __builtin_ctz((int)PageAllocationGranularity());
|
||||
page_characteristics.shift.store(shift, std::memory_order_relaxed);
|
||||
}
|
||||
return shift;
|
||||
#else
|
||||
return 12; // 4kB
|
||||
#endif
|
||||
@ -59,8 +97,17 @@ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PageAllocationGranularity() {
|
||||
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
|
||||
// below, but was separated out for OS_APPLE to avoid << on a non-constexpr.
|
||||
// below, but was separated out for IS_APPLE to avoid << on a non-constexpr.
|
||||
return vm_page_size;
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
// arm64 supports 4kb, 16kb, and 64kb page sizes. Retrieve from or
|
||||
// initialize cache.
|
||||
int size = page_characteristics.size.load(std::memory_order_relaxed);
|
||||
if (UNLIKELY(size == 0)) {
|
||||
size = getpagesize();
|
||||
page_characteristics.size.store(size, std::memory_order_relaxed);
|
||||
}
|
||||
return size;
|
||||
#else
|
||||
return 1 << PageAllocationGranularityShift();
|
||||
#endif
|
||||
@ -90,9 +137,11 @@ SystemPageShift() {
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SystemPageSize() {
|
||||
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||
// This is literally equivalent to |1 << SystemPageShift()| below, but was
|
||||
// separated out for 64-bit OS_APPLE to avoid << on a non-constexpr.
|
||||
// separated out for 64-bit IS_APPLE and arm64 on Linux to avoid << on a
|
||||
// non-constexpr.
|
||||
return PageAllocationGranularity();
|
||||
#else
|
||||
return 1 << SystemPageShift();
|
||||
|
@ -9,12 +9,12 @@
|
||||
#include <ostream>
|
||||
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "build/build_config.h"
|
||||
@ -43,7 +43,7 @@ NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
|
||||
|
||||
NOINLINE void HandleGigaCageAllocFailure() {
|
||||
NO_CODE_FOLDING();
|
||||
uint32_t alloc_page_error_code = base::GetAllocPageErrorCode();
|
||||
uint32_t alloc_page_error_code = GetAllocPageErrorCode();
|
||||
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
|
||||
// It's important to easily differentiate these two failures on Windows, so
|
||||
// crash with different stacks.
|
||||
@ -184,6 +184,12 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() {
|
||||
setup_.configurable_pool_ = 0;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
|
||||
PageCharacteristics page_characteristics;
|
||||
|
||||
#endif // BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
|
||||
#endif // defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
@ -19,7 +20,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
#include "build/buildflag.h"
|
||||
|
@ -22,44 +22,52 @@
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
|
||||
// This is from page_allocator_constants.h and doesn't really fit here, but
|
||||
// there isn't a centralized initialization function in page_allocator.cc, so
|
||||
// there's no good place in that file to do a STATIC_ASSERT_OR_PA_CHECK.
|
||||
STATIC_ASSERT_OR_PA_CHECK((SystemPageSize() & SystemPageOffsetMask()) == 0,
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
(internal::SystemPageSize() & internal::SystemPageOffsetMask()) == 0,
|
||||
"SystemPageSize() must be power of 2");
|
||||
|
||||
// Two partition pages are used as guard / metadata page so make sure the
|
||||
// super page size is bigger.
|
||||
STATIC_ASSERT_OR_PA_CHECK(PartitionPageSize() * 4 <= kSuperPageSize,
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
internal::PartitionPageSize() * 4 <= internal::kSuperPageSize,
|
||||
"ok super page size");
|
||||
STATIC_ASSERT_OR_PA_CHECK((kSuperPageSize & SystemPageOffsetMask()) == 0,
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
(internal::kSuperPageSize & internal::SystemPageOffsetMask()) == 0,
|
||||
"ok super page multiple");
|
||||
// Four system pages gives us room to hack out a still-guard-paged piece
|
||||
// of metadata in the middle of a guard partition page.
|
||||
STATIC_ASSERT_OR_PA_CHECK(SystemPageSize() * 4 <= PartitionPageSize(),
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
internal::SystemPageSize() * 4 <= internal::PartitionPageSize(),
|
||||
"ok partition page size");
|
||||
STATIC_ASSERT_OR_PA_CHECK((PartitionPageSize() & SystemPageOffsetMask()) == 0,
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
(internal::PartitionPageSize() & internal::SystemPageOffsetMask()) == 0,
|
||||
"ok partition page multiple");
|
||||
static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
|
||||
kPageMetadataSize,
|
||||
internal::kPageMetadataSize,
|
||||
"PartitionPage should not be too big");
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
kPageMetadataSize * NumPartitionPagesPerSuperPage() <= SystemPageSize(),
|
||||
internal::kPageMetadataSize * internal::NumPartitionPagesPerSuperPage() <=
|
||||
internal::SystemPageSize(),
|
||||
"page metadata fits in hole");
|
||||
|
||||
// Limit to prevent callers accidentally overflowing an int size.
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
MaxDirectMapped() <= (1UL << 31) + DirectMapAllocationGranularity(),
|
||||
internal::MaxDirectMapped() <=
|
||||
(1UL << 31) + internal::DirectMapAllocationGranularity(),
|
||||
"maximum direct mapped allocation");
|
||||
|
||||
// Check that some of our zanier calculations worked out as expected.
|
||||
static_assert(kSmallestBucket == kAlignment, "generic smallest bucket");
|
||||
static_assert(kMaxBucketed == 917504, "generic max bucketed");
|
||||
static_assert(internal::kSmallestBucket == internal::kAlignment,
|
||||
"generic smallest bucket");
|
||||
static_assert(internal::kMaxBucketed == 917504, "generic max bucketed");
|
||||
STATIC_ASSERT_OR_PA_CHECK(
|
||||
MaxSystemPagesPerRegularSlotSpan() <= 16,
|
||||
internal::MaxSystemPagesPerRegularSlotSpan() <= 16,
|
||||
"System pages per slot span must be no greater than 16.");
|
||||
|
||||
PA_DCHECK(on_out_of_memory);
|
||||
@ -110,4 +118,4 @@ void CheckThatSlotOffsetIsZero(uintptr_t address) {
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
14
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
14
src/base/allocator/partition_allocator/partition_alloc.gni
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright (c) 2022 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
declare_args() {
|
||||
make_partition_alloc_standalone = false
|
||||
}
|
||||
|
||||
# TODO(): move partition allocator's own args defined by
|
||||
# //base/allocator/allocator.gni here. e.g. put_ref_count_in_previous_slot
|
||||
# (this changes partition alloc's behavior.)
|
||||
# On the otherhand, partition_aloc_as_malloc is related to allocator_shim.
|
||||
# Caller sides should init PA-E. e.g. PartitionAllocMallocInitOnce()
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
|
||||
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
|
||||
@ -37,14 +37,16 @@ struct BASE_EXPORT PartitionAllocator {
|
||||
|
||||
using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
|
||||
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
||||
namespace partition_alloc {
|
||||
namespace base {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::PartitionAllocator;
|
||||
using ::partition_alloc::PartitionAllocator;
|
||||
using ::partition_alloc::PartitionAllocGlobalInit;
|
||||
using ::partition_alloc::PartitionAllocGlobalUninitForTesting;
|
||||
|
||||
} // namespace partition_alloc
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "base/check.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#define PA_STRINGIFY_IMPL(s) #s
|
||||
#define PA_STRINGIFY(s) PA_STRINGIFY_IMPL(s)
|
||||
@ -96,10 +97,19 @@
|
||||
|
||||
#endif
|
||||
|
||||
// alignas(16) DebugKv causes breakpad_unittests and sandbox_linux_unittests
|
||||
// failures on android-marshmallow-x86-rel because of SIGSEGV.
|
||||
#if BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_X86_FAMILY) && \
|
||||
defined(ARCH_CPU_32_BITS)
|
||||
#define PA_DEBUGKV_ALIGN alignas(8)
|
||||
#else
|
||||
#define PA_DEBUGKV_ALIGN alignas(16)
|
||||
#endif
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Used for PA_DEBUG_DATA_ON_STACK, below.
|
||||
struct alignas(16) DebugKv {
|
||||
struct PA_DEBUGKV_ALIGN DebugKv {
|
||||
// 16 bytes object aligned on 16 bytes, to make it easier to see in crash
|
||||
// reports.
|
||||
char k[8] = {}; // Not necessarily 0-terminated.
|
||||
|
@ -205,6 +205,16 @@ constexpr bool kUseLazyCommit = false;
|
||||
#define PA_REF_COUNT_CHECK_COOKIE
|
||||
#endif
|
||||
|
||||
// Use available space in the reference count to store the initially requested
|
||||
// size from the application. This is used for debugging, hence disabled by
|
||||
// default.
|
||||
// #define PA_REF_COUNT_STORE_REQUESTED_SIZE
|
||||
|
||||
#if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE) && \
|
||||
defined(PA_REF_COUNT_CHECK_COOKIE)
|
||||
#error "Cannot use a cookie *and* store the allocation size"
|
||||
#endif
|
||||
|
||||
// Prefer smaller slot spans.
|
||||
//
|
||||
// Smaller slot spans may improve dirty memory fragmentation, but may also
|
||||
@ -212,8 +222,21 @@ constexpr bool kUseLazyCommit = false;
|
||||
//
|
||||
// This is intended to roll out more broadly, but only enabled on Linux for now
|
||||
// to get performance bot and real-world data pre-A/B experiment.
|
||||
#if BUILDFLAG(IS_LINUX)
|
||||
//
|
||||
// Also enabled on ARM64 macOS, as the 16kiB pages on this platform lead to
|
||||
// larger slot spans.
|
||||
#if BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
|
||||
#define PA_PREFER_SMALLER_SLOT_SPANS
|
||||
#endif // BUILDFLAG(IS_LINUX)
|
||||
|
||||
// Build MTECheckedPtr code.
|
||||
//
|
||||
// Only applicable to code with 64-bit pointers. Currently conflicts with true
|
||||
// hardware MTE.
|
||||
#if BUILDFLAG(USE_MTE_CHECKED_PTR) && defined(PA_HAS_64_BITS_POINTERS) && \
|
||||
!defined(PA_HAS_MEMORY_TAGGING)
|
||||
#define PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS
|
||||
#endif // BUILDFLAG(USE_MTE_CHECKED_PTR) && defined(PA_HAS_64_BITS_POINTERS) &&
|
||||
// !defined(PA_HAS_MEMORY_TAGGING)
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONFIG_H_
|
||||
|
@ -79,10 +79,11 @@ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return 18; // 256 KiB
|
||||
}
|
||||
#elif BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
PartitionPageShift() {
|
||||
return vm_page_shift + 2;
|
||||
return PageAllocationGranularityShift() + 2;
|
||||
}
|
||||
#else
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
@ -151,6 +152,7 @@ MaxRegularSlotSpanSize() {
|
||||
// | Guard page (4 KiB) |
|
||||
// | Metadata page (4 KiB) |
|
||||
// | Guard pages (8 KiB) |
|
||||
// | TagBitmap |
|
||||
// | *Scan State Bitmap |
|
||||
// | Slot span |
|
||||
// | Slot span |
|
||||
@ -159,7 +161,9 @@ MaxRegularSlotSpanSize() {
|
||||
// | Guard pages (16 KiB) |
|
||||
// +-----------------------+
|
||||
//
|
||||
// State Bitmap is inserted for partitions that may have quarantine enabled.
|
||||
// TagBitmap is only present when
|
||||
// defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) is true. State Bitmap
|
||||
// is inserted for partitions that may have quarantine enabled.
|
||||
//
|
||||
// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
|
||||
// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
|
||||
@ -237,11 +241,14 @@ constexpr size_t kNumPools = 3;
|
||||
// to keep for now only because nothing uses PartitionAlloc on iOS yet.
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
constexpr size_t kPoolMaxSize = kGiB / 4;
|
||||
#elif BUILDFLAG(IS_MAC)
|
||||
#elif BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
|
||||
// Special-case macOS. Contrary to other platforms, there is no sandbox limit
|
||||
// there, meaning that a single renderer could "happily" consume >8GiB. So the
|
||||
// 8GiB pool size is a regression. Make the limit higher on this platform only
|
||||
// to be consistent with previous behavior. See crbug.com/1232567 for details.
|
||||
//
|
||||
// On Linux, reserving memory is not costly, and we have cases where heaps can
|
||||
// grow to more than 8GiB without being a memory leak.
|
||||
constexpr size_t kPoolMaxSize = 16 * kGiB;
|
||||
#else
|
||||
constexpr size_t kPoolMaxSize = 8 * kGiB;
|
||||
@ -353,7 +360,6 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
|
||||
// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
|
||||
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
|
||||
// harder to exploit.
|
||||
//
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
MaxDirectMapped() {
|
||||
|
@ -44,17 +44,29 @@ constexpr bool ThreadSafe = true;
|
||||
template <bool thread_safe>
|
||||
struct SlotSpanMetadata;
|
||||
|
||||
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
void CheckThatSlotOffsetIsZero(uintptr_t address);
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
class PartitionStatsDumper;
|
||||
|
||||
template <bool thread_safe = internal::ThreadSafe>
|
||||
struct PartitionRoot;
|
||||
|
||||
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
namespace base {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::partition_alloc::PartitionRoot;
|
||||
using ::partition_alloc::PartitionStatsDumper;
|
||||
using ::partition_alloc::ThreadSafePartitionRoot;
|
||||
using ::partition_alloc::internal::kAlignment;
|
||||
|
||||
namespace internal {
|
||||
@ -64,35 +76,13 @@ using ::partition_alloc::internal::ThreadSafe;
|
||||
|
||||
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
BASE_EXPORT void CheckThatSlotOffsetIsZero(uintptr_t address);
|
||||
using ::partition_alloc::internal::CheckThatSlotOffsetIsZero;
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <bool thread_safe = true>
|
||||
struct PartitionRoot;
|
||||
|
||||
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
|
||||
|
||||
} // namespace base
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::PartitionRoot;
|
||||
|
||||
namespace internal {
|
||||
|
||||
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
|
||||
BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
using ::base::internal::CheckThatSlotOffsetIsZero;
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
// From https://clang.llvm.org/docs/AttributeReference.html#malloc:
|
||||
//
|
||||
// The malloc attribute indicates that the function acts like a system memory
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
@ -19,10 +20,11 @@
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/check.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "build/build_config.h"
|
||||
@ -634,6 +636,28 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
|
||||
// Double check that we had enough space in the super page for the new slot
|
||||
// span.
|
||||
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
PA_DCHECK(root->next_tag_bitmap_page);
|
||||
uintptr_t next_tag_bitmap_page =
|
||||
base::bits::AlignUp(reinterpret_cast<uintptr_t>(
|
||||
PartitionTagPointer(root->next_partition_page)),
|
||||
SystemPageSize());
|
||||
if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
|
||||
#if DCHECK_IS_ON()
|
||||
uintptr_t super_page =
|
||||
reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
|
||||
uintptr_t tag_bitmap = super_page + PartitionPageSize();
|
||||
PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
|
||||
PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
|
||||
#endif
|
||||
SetSystemPagesAccess(root->next_tag_bitmap_page,
|
||||
next_tag_bitmap_page - root->next_tag_bitmap_page,
|
||||
PageAccessibilityConfiguration::kReadWrite);
|
||||
root->next_tag_bitmap_page = next_tag_bitmap_page;
|
||||
}
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
@ -666,7 +690,9 @@ ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
|
||||
std::memory_order_relaxed);
|
||||
|
||||
root->next_super_page = super_page + kSuperPageSize;
|
||||
uintptr_t state_bitmap = super_page + PartitionPageSize();
|
||||
// TODO(crbug.com/1307514): Add direct map support.
|
||||
uintptr_t state_bitmap = super_page + PartitionPageSize() +
|
||||
(is_direct_mapped() ? 0 : ReservedTagBitmapSize());
|
||||
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
|
||||
const size_t state_bitmap_reservation_size =
|
||||
root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
|
||||
@ -745,6 +771,19 @@ ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
|
||||
payload < SuperPagesEndFromExtent(current_extent));
|
||||
}
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
// `root->next_partition_page` currently points at the start of the
|
||||
// super page payload. We point `root->next_tag_bitmap_page` to the
|
||||
// corresponding point in the tag bitmap and let the caller
|
||||
// (slot span allocation) take care of the rest.
|
||||
root->next_tag_bitmap_page =
|
||||
base::bits::AlignDown(reinterpret_cast<uintptr_t>(
|
||||
PartitionTagPointer(root->next_partition_page)),
|
||||
SystemPageSize());
|
||||
PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
|
||||
<< "tag bitmap can never intrude on metadata partition page";
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
|
||||
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
|
||||
// sure to register the super-page after it has been fully initialized.
|
||||
@ -841,6 +880,9 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
|
||||
return_slot =
|
||||
::partition_alloc::internal::TagMemoryRangeRandomly(return_slot, size);
|
||||
}
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag());
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
// Add all slots that fit within so far committed pages to the free list.
|
||||
PartitionFreelistEntry* prev_entry = nullptr;
|
||||
@ -851,6 +893,9 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
|
||||
next_slot =
|
||||
::partition_alloc::internal::TagMemoryRangeRandomly(next_slot, size);
|
||||
}
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag());
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot);
|
||||
if (!slot_span->get_freelist_head()) {
|
||||
PA_DCHECK(!prev_entry);
|
||||
|
@ -7,10 +7,10 @@
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
@ -9,16 +9,16 @@
|
||||
#include <cstdint>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/base/sys_byteorder.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/immediate_crash.h"
|
||||
#include "base/sys_byteorder.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
@ -289,6 +289,13 @@ ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNextInternal(
|
||||
// |for_thread_cache|, since the argument is always a compile-time constant.
|
||||
if (UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
|
||||
if constexpr (crash_on_corruption) {
|
||||
// Put the corrupted data on the stack, it may give us more information
|
||||
// about what kind of corruption that was.
|
||||
PA_DEBUG_DATA_ON_STACK("first",
|
||||
static_cast<size_t>(encoded_next_.encoded_));
|
||||
#if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
|
||||
PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
|
||||
#endif
|
||||
FreelistCorruptionDetected(extra);
|
||||
} else {
|
||||
return nullptr;
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
@ -20,7 +21,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
@ -5,25 +5,26 @@
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_bucket.h"
|
||||
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/thread_annotations.h"
|
||||
@ -63,8 +64,8 @@ static_assert(
|
||||
// CAUTION! |extent| must point to the extent of the first super page in the
|
||||
// range of consecutive super pages.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE uintptr_t
|
||||
SuperPagesBeginFromExtent(PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
ALWAYS_INLINE uintptr_t SuperPagesBeginFromExtent(
|
||||
const PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
|
||||
uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
|
||||
PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
|
||||
@ -77,13 +78,13 @@ SuperPagesBeginFromExtent(PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
// CAUTION! |extent| must point to the extent of the first super page in the
|
||||
// range of consecutive super pages.
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE uintptr_t
|
||||
SuperPagesEndFromExtent(PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
ALWAYS_INLINE uintptr_t SuperPagesEndFromExtent(
|
||||
const PartitionSuperPageExtentEntry<thread_safe>* extent) {
|
||||
return SuperPagesBeginFromExtent(extent) +
|
||||
(extent->number_of_consecutive_super_pages * kSuperPageSize);
|
||||
}
|
||||
|
||||
using AllocationStateMap = ::base::internal::
|
||||
using AllocationStateMap =
|
||||
StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
|
||||
|
||||
// Metadata of the slot span.
|
||||
@ -134,6 +135,12 @@ struct __attribute__((packed)) SlotSpanMetadata {
|
||||
// PartitionPageSize() is 4 times the OS page size.
|
||||
static constexpr size_t kMaxSlotsPerSlotSpan =
|
||||
4 * (1 << 14) / kSmallestBucket;
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
// System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
|
||||
// currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
|
||||
// so we use the 16 kiB maximum (64 kiB will crash).
|
||||
static constexpr size_t kMaxSlotsPerSlotSpan =
|
||||
4 * (1 << 14) / kSmallestBucket;
|
||||
#else
|
||||
// A slot span can "span" multiple PartitionPages, but then its slot size is
|
||||
// larger, so it doesn't have as many slots.
|
||||
@ -410,17 +417,27 @@ CommittedStateBitmapSize() {
|
||||
// caller's responsibility to ensure that the bitmaps even exist.
|
||||
ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
|
||||
PA_DCHECK(!(super_page % kSuperPageAlignment));
|
||||
return super_page + PartitionPageSize();
|
||||
return super_page + PartitionPageSize() +
|
||||
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
|
||||
}
|
||||
ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(uintptr_t super_page) {
|
||||
return reinterpret_cast<AllocationStateMap*>(
|
||||
SuperPageStateBitmapAddr(super_page));
|
||||
}
|
||||
|
||||
// Returns the address of the tag bitmap of the `super_page`. Caller must ensure
|
||||
// that bitmap exists.
|
||||
ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
|
||||
PA_DCHECK(IsReservationStart(super_page));
|
||||
// Skip over the guard pages / metadata.
|
||||
return super_page + PartitionPageSize();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
|
||||
bool with_quarantine) {
|
||||
PA_DCHECK(!(super_page % kSuperPageAlignment));
|
||||
return super_page + PartitionPageSize() +
|
||||
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0) +
|
||||
(with_quarantine ? ReservedStateBitmapSize() : 0);
|
||||
}
|
||||
|
||||
|
@ -220,6 +220,13 @@ class BASE_EXPORT PartitionRefCount {
|
||||
return alive;
|
||||
}
|
||||
|
||||
#if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
|
||||
ALWAYS_INLINE void SetRequestedSize(size_t size) {
|
||||
requested_size_ = static_cast<uint32_t>(size);
|
||||
}
|
||||
ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
|
||||
#endif // defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
|
||||
|
||||
private:
|
||||
// The common parts shared by Release() and ReleaseFromUnprotectedPtr().
|
||||
// Called after updating the ref counts, |count| is the new value of |count_|
|
||||
@ -286,6 +293,10 @@ class BASE_EXPORT PartitionRefCount {
|
||||
static constexpr uint32_t kCookieSalt = 0xc01dbeef;
|
||||
volatile uint32_t brp_cookie_;
|
||||
#endif
|
||||
|
||||
#if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
|
||||
uint32_t requested_size_;
|
||||
#endif
|
||||
};
|
||||
|
||||
ALWAYS_INLINE PartitionRefCount::PartitionRefCount()
|
||||
@ -297,7 +308,7 @@ ALWAYS_INLINE PartitionRefCount::PartitionRefCount()
|
||||
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
|
||||
static_assert(base::kAlignment % alignof(PartitionRefCount) == 0,
|
||||
static_assert(kAlignment % alignof(PartitionRefCount) == 0,
|
||||
"kAlignment must be multiples of alignof(PartitionRefCount).");
|
||||
|
||||
// Allocate extra space for the reference count to satisfy the alignment
|
||||
@ -344,7 +355,7 @@ ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
|
||||
|
||||
// Allocate extra space for the reference count to satisfy the alignment
|
||||
// requirement.
|
||||
static constexpr size_t kInSlotRefCountBufferSize = base::kAlignment;
|
||||
static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
|
||||
constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
|
||||
|
||||
// This is for adjustment of pointers right past the allocation, which may point
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/oom.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
@ -21,7 +22,6 @@
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/bits.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
@ -48,18 +48,17 @@ void RecordAllocOrFree(uintptr_t addr, size_t size) {
|
||||
} // namespace partition_alloc::internal
|
||||
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
#if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
|
||||
|
||||
namespace {
|
||||
|
||||
internal::PartitionLock g_root_enumerator_lock;
|
||||
|
||||
internal::Lock g_root_enumerator_lock;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
internal::PartitionLock& PartitionRoot<thread_safe>::GetEnumeratorLock() {
|
||||
internal::Lock& PartitionRoot<thread_safe>::GetEnumeratorLock() {
|
||||
return g_root_enumerator_lock;
|
||||
}
|
||||
|
||||
@ -97,8 +96,7 @@ class PartitionRootEnumerator {
|
||||
}
|
||||
|
||||
void Register(ThreadSafePartitionRoot* root) {
|
||||
internal::PartitionAutoLock guard(
|
||||
ThreadSafePartitionRoot::GetEnumeratorLock());
|
||||
internal::ScopedGuard guard(ThreadSafePartitionRoot::GetEnumeratorLock());
|
||||
root->next_root = partition_roots_;
|
||||
root->prev_root = nullptr;
|
||||
if (partition_roots_)
|
||||
@ -107,8 +105,7 @@ class PartitionRootEnumerator {
|
||||
}
|
||||
|
||||
void Unregister(ThreadSafePartitionRoot* root) {
|
||||
internal::PartitionAutoLock guard(
|
||||
ThreadSafePartitionRoot::GetEnumeratorLock());
|
||||
internal::ScopedGuard guard(ThreadSafePartitionRoot::GetEnumeratorLock());
|
||||
ThreadSafePartitionRoot* prev = root->prev_root;
|
||||
ThreadSafePartitionRoot* next = root->next_root;
|
||||
if (prev) {
|
||||
@ -173,7 +170,7 @@ void BeforeForkInParent() NO_THREAD_SAFETY_ANALYSIS {
|
||||
LockRoot, false,
|
||||
internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
|
||||
|
||||
internal::ThreadCacheRegistry::GetLock().Acquire();
|
||||
ThreadCacheRegistry::GetLock().Acquire();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -193,7 +190,7 @@ void UnlockOrReinitRoot(PartitionRoot<internal::ThreadSafe>* root,
|
||||
|
||||
void ReleaseLocks(bool in_child) NO_THREAD_SAFETY_ANALYSIS {
|
||||
// In reverse order, even though there are no lock ordering dependencies.
|
||||
UnlockOrReinit(internal::ThreadCacheRegistry::GetLock(), in_child);
|
||||
UnlockOrReinit(ThreadCacheRegistry::GetLock(), in_child);
|
||||
internal::PartitionRootEnumerator::Instance().Enumerate(
|
||||
UnlockOrReinitRoot, in_child,
|
||||
internal::PartitionRootEnumerator::EnumerateOrder::kReverse);
|
||||
@ -217,8 +214,7 @@ void AfterForkInChild() {
|
||||
// If we don't reclaim this memory, it is lost forever. Note that this is only
|
||||
// really an issue if we fork() a multi-threaded process without calling
|
||||
// exec() right away, which is discouraged.
|
||||
internal::ThreadCacheRegistry::Instance()
|
||||
.ForcePurgeAllThreadAfterForkUnsafe();
|
||||
ThreadCacheRegistry::Instance().ForcePurgeAllThreadAfterForkUnsafe();
|
||||
}
|
||||
#endif // defined(PA_HAS_ATFORK_HANDLER)
|
||||
|
||||
@ -309,14 +305,16 @@ static size_t PartitionPurgeSlotSpan(
|
||||
constexpr size_t kMaxSlotCount =
|
||||
(PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
|
||||
SystemPageSize();
|
||||
#elif BUILDFLAG(IS_APPLE)
|
||||
#elif BUILDFLAG(IS_APPLE) || (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||
// It's better for slot_usage to be stack-allocated and fixed-size, which
|
||||
// demands that its size be constexpr. On OS_APPLE, PartitionPageSize() is
|
||||
// always SystemPageSize() << 2, so regardless of what the run time page size
|
||||
// is, kMaxSlotCount can always be simplified to this expression.
|
||||
constexpr size_t kMaxSlotCount = 4 * kMaxPartitionPagesPerRegularSlotSpan;
|
||||
PA_CHECK(kMaxSlotCount ==
|
||||
(PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
|
||||
// demands that its size be constexpr. On IS_APPLE and Linux on arm64,
|
||||
// PartitionPageSize() is always SystemPageSize() << 2, so regardless of
|
||||
// what the run time page size is, kMaxSlotCount can always be simplified
|
||||
// to this expression.
|
||||
constexpr size_t kMaxSlotCount =
|
||||
4 * internal::kMaxPartitionPagesPerRegularSlotSpan;
|
||||
PA_CHECK(kMaxSlotCount == (PartitionPageSize() *
|
||||
internal::kMaxPartitionPagesPerRegularSlotSpan) /
|
||||
SystemPageSize());
|
||||
#endif
|
||||
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
|
||||
@ -573,7 +571,7 @@ template <bool thread_safe>
|
||||
|
||||
// Check whether this OOM is due to a lot of super pages that are allocated
|
||||
// but not committed, probably due to http://crbug.com/421387.
|
||||
if (uncommitted_size > kReasonableSizeOfUnusedPages) {
|
||||
if (uncommitted_size > internal::kReasonableSizeOfUnusedPages) {
|
||||
internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
|
||||
}
|
||||
|
||||
@ -625,14 +623,39 @@ void PartitionRoot<thread_safe>::DecommitEmptySlotSpans() {
|
||||
PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::DestructForTesting() {
|
||||
// We need to destruct the thread cache before we unreserve any of the super
|
||||
// pages below, which we currently are not doing. So, we should only call
|
||||
// this function on PartitionRoots without a thread cache.
|
||||
PA_CHECK(!with_thread_cache);
|
||||
auto pool_handle = ChoosePool();
|
||||
auto* curr = first_extent;
|
||||
while (curr != nullptr) {
|
||||
auto* next = curr->next;
|
||||
internal::AddressPoolManager::GetInstance()->UnreserveAndDecommit(
|
||||
pool_handle, reinterpret_cast<uintptr_t>(curr),
|
||||
internal::kSuperPageSize * curr->number_of_consecutive_super_pages);
|
||||
curr = next;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
{
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// Needed to statically bound page size, which is a runtime constant on
|
||||
// apple OSes.
|
||||
PA_CHECK((SystemPageSize() == (size_t{1} << 12)) ||
|
||||
(SystemPageSize() == (size_t{1} << 14)));
|
||||
PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
|
||||
(internal::SystemPageSize() == (size_t{1} << 14)));
|
||||
#elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
|
||||
// Check runtime pagesize. Though the code is currently the same, it is
|
||||
// not merged with the IS_APPLE case above as a 1 << 16 case needs to be
|
||||
// added here in the future, to allow 64 kiB pagesize. That is only
|
||||
// supported on Linux on arm64, not on IS_APPLE, but not yet present here
|
||||
// as the rest of the partition allocator does not currently support it.
|
||||
PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
|
||||
(internal::SystemPageSize() == (size_t{1} << 14)));
|
||||
#endif
|
||||
|
||||
::partition_alloc::internal::ScopedGuard guard{lock_};
|
||||
@ -716,17 +739,18 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
// Set up the actual usable buckets first.
|
||||
constexpr internal::BucketIndexLookup lookup{};
|
||||
size_t bucket_index = 0;
|
||||
while (lookup.bucket_sizes()[bucket_index] != kInvalidBucketSize) {
|
||||
while (lookup.bucket_sizes()[bucket_index] !=
|
||||
internal::kInvalidBucketSize) {
|
||||
buckets[bucket_index].Init(lookup.bucket_sizes()[bucket_index]);
|
||||
bucket_index++;
|
||||
}
|
||||
PA_DCHECK(bucket_index < kNumBuckets);
|
||||
PA_DCHECK(bucket_index < internal::kNumBuckets);
|
||||
|
||||
// Remaining buckets are not usable, and not real.
|
||||
for (size_t index = bucket_index; index < kNumBuckets; index++) {
|
||||
for (size_t index = bucket_index; index < internal::kNumBuckets; index++) {
|
||||
// Cannot init with size 0 since it computes 1 / size, but make sure the
|
||||
// bucket is invalid.
|
||||
buckets[index].Init(kInvalidBucketSize);
|
||||
buckets[index].Init(internal::kInvalidBucketSize);
|
||||
buckets[index].active_slot_spans_head = nullptr;
|
||||
PA_DCHECK(!buckets[index].is_valid());
|
||||
}
|
||||
@ -735,12 +759,12 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
// TLS in ThreadCache not supported on other OSes.
|
||||
with_thread_cache = false;
|
||||
#else
|
||||
internal::ThreadCache::EnsureThreadSpecificDataInitialized();
|
||||
ThreadCache::EnsureThreadSpecificDataInitialized();
|
||||
with_thread_cache =
|
||||
(opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
|
||||
|
||||
if (with_thread_cache)
|
||||
internal::ThreadCache::Init(this);
|
||||
ThreadCache::Init(this);
|
||||
#endif // !defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
|
||||
#if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
|
||||
@ -786,7 +810,7 @@ void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
|
||||
int before =
|
||||
thread_caches_being_constructed_.fetch_add(1, std::memory_order_acquire);
|
||||
PA_CHECK(before == 0);
|
||||
internal::ThreadCache::Init(this);
|
||||
ThreadCache::Init(this);
|
||||
thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
|
||||
with_thread_cache = true;
|
||||
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
|
||||
@ -817,15 +841,15 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
|
||||
// Make this check before comparing slot sizes, as even with equal or similar
|
||||
// slot sizes we can save a lot if the original allocation was heavily padded
|
||||
// for alignment.
|
||||
if ((new_reservation_size >> SystemPageShift()) * 5 <
|
||||
(current_reservation_size >> SystemPageShift()) * 4)
|
||||
if ((new_reservation_size >> internal::SystemPageShift()) * 5 <
|
||||
(current_reservation_size >> internal::SystemPageShift()) * 4)
|
||||
return false;
|
||||
|
||||
// Note that the new size isn't a bucketed size; this function is called
|
||||
// whenever we're reallocating a direct mapped allocation, so calculate it
|
||||
// the way PartitionDirectMap() would.
|
||||
size_t new_slot_size = GetDirectMapSlotSize(raw_size);
|
||||
if (new_slot_size < kMinDirectMappedDownsize)
|
||||
if (new_slot_size < internal::kMinDirectMappedDownsize)
|
||||
return false;
|
||||
|
||||
// Past this point, we decided we'll attempt to reallocate without relocating,
|
||||
@ -841,11 +865,12 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
|
||||
current_reservation_size - extent->padding_for_alignment -
|
||||
PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
|
||||
#if DCHECK_IS_ON()
|
||||
uintptr_t reservation_start = slot_start & kSuperPageBaseMask;
|
||||
uintptr_t reservation_start = slot_start & internal::kSuperPageBaseMask;
|
||||
PA_DCHECK(internal::IsReservationStart(reservation_start));
|
||||
PA_DCHECK(slot_start + available_reservation_size ==
|
||||
reservation_start + current_reservation_size -
|
||||
GetDirectMapMetadataAndGuardPagesSize() + PartitionPageSize());
|
||||
GetDirectMapMetadataAndGuardPagesSize() +
|
||||
internal::PartitionPageSize());
|
||||
#endif
|
||||
|
||||
if (new_slot_size == current_slot_size) {
|
||||
@ -871,7 +896,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
memset(reinterpret_cast<void*>(slot_start + current_slot_size),
|
||||
kUninitializedByte, recommit_slot_size_growth);
|
||||
internal::kUninitializedByte, recommit_slot_size_growth);
|
||||
#endif
|
||||
} else {
|
||||
// We can't perform the realloc in-place.
|
||||
@ -961,9 +986,10 @@ void* PartitionRoot<thread_safe>::ReallocWithFlags(int flags,
|
||||
bool no_hooks = flags & AllocFlags::kNoHooks;
|
||||
if (UNLIKELY(!ptr)) {
|
||||
return no_hooks
|
||||
? AllocWithFlagsNoHooks(flags, new_size, PartitionPageSize())
|
||||
: AllocWithFlagsInternal(flags, new_size, PartitionPageSize(),
|
||||
type_name);
|
||||
? AllocWithFlagsNoHooks(flags, new_size,
|
||||
internal::PartitionPageSize())
|
||||
: AllocWithFlagsInternal(
|
||||
flags, new_size, internal::PartitionPageSize(), type_name);
|
||||
}
|
||||
|
||||
if (UNLIKELY(!new_size)) {
|
||||
@ -971,7 +997,7 @@ void* PartitionRoot<thread_safe>::ReallocWithFlags(int flags,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (new_size > MaxDirectMapped()) {
|
||||
if (new_size > internal::MaxDirectMapped()) {
|
||||
if (flags & AllocFlags::kReturnNull)
|
||||
return nullptr;
|
||||
internal::PartitionExcessiveAllocationSize(new_size);
|
||||
@ -1019,10 +1045,11 @@ void* PartitionRoot<thread_safe>::ReallocWithFlags(int flags,
|
||||
}
|
||||
|
||||
// This realloc cannot be resized in-place. Sadness.
|
||||
void* ret = no_hooks
|
||||
? AllocWithFlagsNoHooks(flags, new_size, PartitionPageSize())
|
||||
: AllocWithFlagsInternal(flags, new_size, PartitionPageSize(),
|
||||
type_name);
|
||||
void* ret =
|
||||
no_hooks ? AllocWithFlagsNoHooks(flags, new_size,
|
||||
internal::PartitionPageSize())
|
||||
: AllocWithFlagsInternal(
|
||||
flags, new_size, internal::PartitionPageSize(), type_name);
|
||||
if (!ret) {
|
||||
if (flags & AllocFlags::kReturnNull)
|
||||
return nullptr;
|
||||
@ -1050,10 +1077,10 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
|
||||
DecommitEmptySlotSpans();
|
||||
if (flags & PurgeFlags::kDiscardUnusedSystemPages) {
|
||||
for (Bucket& bucket : buckets) {
|
||||
if (bucket.slot_size == kInvalidBucketSize)
|
||||
if (bucket.slot_size == internal::kInvalidBucketSize)
|
||||
continue;
|
||||
|
||||
if (bucket.slot_size >= SystemPageSize())
|
||||
if (bucket.slot_size >= internal::SystemPageSize())
|
||||
internal::PartitionPurgeBucket(&bucket);
|
||||
else
|
||||
bucket.SortSlotSpanFreelists();
|
||||
@ -1082,7 +1109,7 @@ void PartitionRoot<thread_safe>::ShrinkEmptySlotSpansRing(size_t limit) {
|
||||
// are unused, if global_empty_slot_span_ring_size is smaller than
|
||||
// kMaxFreeableSpans. It's simpler, and does not cost anything, since all
|
||||
// the pointers are going to be nullptr.
|
||||
if (index == kMaxFreeableSpans)
|
||||
if (index == internal::kMaxFreeableSpans)
|
||||
index = 0;
|
||||
|
||||
// Went around the whole ring, since this is locked,
|
||||
@ -1109,7 +1136,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
||||
direct_map_lengths =
|
||||
std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
|
||||
}
|
||||
PartitionBucketMemoryStats bucket_stats[kNumBuckets];
|
||||
PartitionBucketMemoryStats bucket_stats[internal::kNumBuckets];
|
||||
size_t num_direct_mapped_allocations = 0;
|
||||
PartitionMemoryStats stats = {0};
|
||||
|
||||
@ -1140,7 +1167,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
||||
#endif
|
||||
|
||||
size_t direct_mapped_allocations_total_size = 0;
|
||||
for (size_t i = 0; i < kNumBuckets; ++i) {
|
||||
for (size_t i = 0; i < internal::kNumBuckets; ++i) {
|
||||
const Bucket* bucket = &bucket_at(i);
|
||||
// Don't report the pseudo buckets that the generic allocator sets up in
|
||||
// order to preserve a fast size->bucket map (see
|
||||
@ -1174,10 +1201,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
|
||||
|
||||
stats.has_thread_cache = with_thread_cache;
|
||||
if (stats.has_thread_cache) {
|
||||
internal::ThreadCacheRegistry::Instance().DumpStats(
|
||||
ThreadCacheRegistry::Instance().DumpStats(
|
||||
true, &stats.current_thread_cache_stats);
|
||||
internal::ThreadCacheRegistry::Instance().DumpStats(
|
||||
false, &stats.all_thread_caches_stats);
|
||||
ThreadCacheRegistry::Instance().DumpStats(false,
|
||||
&stats.all_thread_caches_stats);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1209,7 +1236,7 @@ template <bool thread_safe>
|
||||
void PartitionRoot<thread_safe>::DeleteForTesting(
|
||||
PartitionRoot* partition_root) {
|
||||
if (partition_root->with_thread_cache) {
|
||||
internal::ThreadCache::SwapForTesting(nullptr);
|
||||
ThreadCache::SwapForTesting(nullptr);
|
||||
partition_root->with_thread_cache = false;
|
||||
}
|
||||
|
||||
@ -1227,10 +1254,10 @@ template <>
|
||||
uintptr_t PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCacheAndAlloc(
|
||||
uint16_t bucket_index,
|
||||
size_t* slot_size) {
|
||||
auto* tcache = internal::ThreadCache::Get();
|
||||
auto* tcache = ThreadCache::Get();
|
||||
// See comment in `EnableThreadCacheIfSupport()` for why this is an acquire
|
||||
// load.
|
||||
if (internal::ThreadCache::IsTombstone(tcache) ||
|
||||
if (ThreadCache::IsTombstone(tcache) ||
|
||||
thread_caches_being_constructed_.load(std::memory_order_acquire)) {
|
||||
// Two cases:
|
||||
// 1. Thread is being terminated, don't try to use the thread cache, and
|
||||
@ -1259,7 +1286,7 @@ uintptr_t PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCacheAndAlloc(
|
||||
int before =
|
||||
thread_caches_being_constructed_.fetch_add(1, std::memory_order_relaxed);
|
||||
PA_CHECK(before < std::numeric_limits<int>::max());
|
||||
tcache = internal::ThreadCache::Create(this);
|
||||
tcache = ThreadCache::Create(this);
|
||||
thread_caches_being_constructed_.fetch_sub(1, std::memory_order_relaxed);
|
||||
|
||||
// Cache is created empty, but at least this will trigger batch fill, which
|
||||
@ -1272,11 +1299,11 @@ template struct BASE_EXPORT PartitionRoot<internal::ThreadSafe>;
|
||||
|
||||
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
|
||||
offsetof(PartitionRoot<internal::ThreadSafe>, buckets) +
|
||||
kNumBuckets *
|
||||
internal::kNumBuckets *
|
||||
sizeof(PartitionRoot<internal::ThreadSafe>::Bucket),
|
||||
"sentinel_bucket must be just after the regular buckets.");
|
||||
|
||||
static_assert(
|
||||
offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64,
|
||||
"The lock should not be on the same cacheline as the read-mostly flags");
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
|
||||
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
@ -56,12 +57,13 @@
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan.h"
|
||||
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
@ -70,7 +72,7 @@
|
||||
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
|
||||
// size as other alloc code.
|
||||
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
|
||||
if (size > MaxDirectMapped()) { \
|
||||
if (size > partition_alloc::internal::MaxDirectMapped()) { \
|
||||
if (flags & AllocFlags::kReturnNull) { \
|
||||
return nullptr; \
|
||||
} \
|
||||
@ -96,7 +98,7 @@ void RecordAllocOrFree(uintptr_t addr, size_t size);
|
||||
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
namespace internal {
|
||||
// Avoid including partition_address_space.h from this .h file, by moving the
|
||||
@ -256,7 +258,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// The flags above are accessed for all (de)allocations, and are mostly
|
||||
// read-only. They should not share a cacheline with the data below, which
|
||||
// is only touched when the lock is taken.
|
||||
uint8_t one_cacheline[kPartitionCachelineSize];
|
||||
uint8_t one_cacheline[internal::kPartitionCachelineSize];
|
||||
};
|
||||
|
||||
// Not used on the fastest path (thread cache allocations), but on the fast
|
||||
@ -264,7 +266,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
static_assert(thread_safe, "Only the thread-safe root is supported.");
|
||||
::partition_alloc::internal::Lock lock_;
|
||||
|
||||
Bucket buckets[kNumBuckets] = {};
|
||||
Bucket buckets[internal::kNumBuckets] = {};
|
||||
Bucket sentinel_bucket{};
|
||||
|
||||
// All fields below this comment are not accessed on the fast path.
|
||||
@ -321,11 +323,11 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
SuperPageExtentEntry* current_extent = nullptr;
|
||||
SuperPageExtentEntry* first_extent = nullptr;
|
||||
DirectMapExtent* direct_map_list GUARDED_BY(lock_) = nullptr;
|
||||
SlotSpan* global_empty_slot_span_ring[kMaxFreeableSpans] GUARDED_BY(
|
||||
SlotSpan* global_empty_slot_span_ring[internal::kMaxFreeableSpans] GUARDED_BY(
|
||||
lock_) = {};
|
||||
int16_t global_empty_slot_span_ring_index GUARDED_BY(lock_) = 0;
|
||||
int16_t global_empty_slot_span_ring_size GUARDED_BY(lock_) =
|
||||
kDefaultEmptySlotSpanRingSize;
|
||||
internal::kDefaultEmptySlotSpanRingSize;
|
||||
|
||||
// Integrity check = ~reinterpret_cast<uintptr_t>(this).
|
||||
uintptr_t inverted_self = 0;
|
||||
@ -333,12 +335,24 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
|
||||
bool quarantine_always_for_testing = false;
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
partition_alloc::PartitionTag current_partition_tag = 0;
|
||||
// Points to the end of the committed tag bitmap region.
|
||||
uintptr_t next_tag_bitmap_page = 0;
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
PartitionRoot()
|
||||
: quarantine_mode(QuarantineMode::kAlwaysDisabled),
|
||||
scan_mode(ScanMode::kDisabled) {}
|
||||
explicit PartitionRoot(PartitionOptions opts) { Init(opts); }
|
||||
~PartitionRoot();
|
||||
|
||||
// This will unreserve any space in the GigaCage that the PartitionRoot is
|
||||
// using. This is needed because many tests create and destroy many
|
||||
// PartitionRoots over the lifetime of a process, which can exhaust the
|
||||
// GigaCage and cause tests to fail.
|
||||
void DestructForTesting();
|
||||
|
||||
// Public API
|
||||
//
|
||||
// Allocates out of the given bucket. Properly, this function should probably
|
||||
@ -483,7 +497,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// memory usage.
|
||||
void EnableLargeEmptySlotSpanRing() {
|
||||
::partition_alloc::internal::ScopedGuard locker{lock_};
|
||||
global_empty_slot_span_ring_size = kMaxFreeableSpans;
|
||||
global_empty_slot_span_ring_size = internal::kMaxFreeableSpans;
|
||||
}
|
||||
|
||||
void DumpStats(const char* partition_name,
|
||||
@ -527,8 +541,8 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
with_denser_bucket_distribution = false;
|
||||
}
|
||||
|
||||
internal::ThreadCache* thread_cache_for_testing() const {
|
||||
return with_thread_cache ? internal::ThreadCache::Get() : nullptr;
|
||||
ThreadCache* thread_cache_for_testing() const {
|
||||
return with_thread_cache ? ThreadCache::Get() : nullptr;
|
||||
}
|
||||
size_t get_total_size_of_committed_pages() const {
|
||||
return total_size_of_committed_pages.load(std::memory_order_relaxed);
|
||||
@ -577,7 +591,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// If quarantine is enabled and the tag overflows, move the containing slot
|
||||
// to quarantine, to prevent the attacker from exploiting a pointer that has
|
||||
// an old tag.
|
||||
return HasOverflowTag(object);
|
||||
return internal::HasOverflowTag(object);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
@ -603,7 +617,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// to match granularity of the BRP pool bitmap. For cosistency, we'll use
|
||||
// a partition page everywhere, which is cheap as it's uncommitted address
|
||||
// space anyway.
|
||||
return 2 * PartitionPageSize();
|
||||
return 2 * internal::PartitionPageSize();
|
||||
}
|
||||
|
||||
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
@ -611,8 +625,9 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// Caller must check that the size is not above the MaxDirectMapped()
|
||||
// limit before calling. This also guards against integer overflow in the
|
||||
// calculation here.
|
||||
PA_DCHECK(raw_size <= MaxDirectMapped());
|
||||
return bits::AlignUp(raw_size, SystemPageSize());
|
||||
PA_DCHECK(raw_size <= internal::MaxDirectMapped());
|
||||
return partition_alloc::internal::base::bits::AlignUp(
|
||||
raw_size, internal::SystemPageSize());
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE size_t
|
||||
@ -620,10 +635,10 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// Caller must check that the size is not above the MaxDirectMapped()
|
||||
// limit before calling. This also guards against integer overflow in the
|
||||
// calculation here.
|
||||
PA_DCHECK(padded_raw_size <= MaxDirectMapped());
|
||||
return bits::AlignUp(
|
||||
PA_DCHECK(padded_raw_size <= internal::MaxDirectMapped());
|
||||
return partition_alloc::internal::base::bits::AlignUp(
|
||||
padded_raw_size + GetDirectMapMetadataAndGuardPagesSize(),
|
||||
DirectMapAllocationGranularity());
|
||||
internal::DirectMapAllocationGranularity());
|
||||
}
|
||||
|
||||
ALWAYS_INLINE size_t AdjustSize0IfNeeded(size_t size) const {
|
||||
@ -716,6 +731,17 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
max_empty_slot_spans_dirty_bytes_shift = 0;
|
||||
}
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
|
||||
// TODO(crbug.com/1298696): performance is not an issue. We can use
|
||||
// random tags in lieu of sequential ones.
|
||||
auto tag = ++current_partition_tag;
|
||||
tag += !tag; // Avoid 0.
|
||||
current_partition_tag = tag;
|
||||
return tag;
|
||||
}
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
private:
|
||||
// |buckets| has `kNumBuckets` elements, but we sometimes access it at index
|
||||
// `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
|
||||
@ -727,7 +753,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
// this.
|
||||
ALWAYS_INLINE const Bucket& NO_SANITIZE("undefined")
|
||||
bucket_at(size_t i) const {
|
||||
PA_DCHECK(i <= kNumBuckets);
|
||||
PA_DCHECK(i <= internal::kNumBuckets);
|
||||
return buckets[i];
|
||||
}
|
||||
|
||||
@ -787,7 +813,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
size_t* slot_size);
|
||||
|
||||
#if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
|
||||
static internal::PartitionLock& GetEnumeratorLock();
|
||||
static internal::Lock& GetEnumeratorLock();
|
||||
|
||||
PartitionRoot* GUARDED_BY(GetEnumeratorLock()) next_root = nullptr;
|
||||
PartitionRoot* GUARDED_BY(GetEnumeratorLock()) prev_root = nullptr;
|
||||
@ -795,7 +821,7 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
|
||||
friend class internal::PartitionRootEnumerator;
|
||||
#endif // defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
|
||||
|
||||
friend class internal::ThreadCache;
|
||||
friend class ThreadCache;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
@ -979,8 +1005,9 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
|
||||
size_t slot_span_alignment,
|
||||
size_t* usable_size,
|
||||
bool* is_already_zeroed) {
|
||||
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
|
||||
bits::IsPowerOfTwo(slot_span_alignment));
|
||||
PA_DCHECK(
|
||||
(slot_span_alignment >= internal::PartitionPageSize()) &&
|
||||
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
|
||||
SlotSpan* slot_span = bucket->active_slot_spans_head;
|
||||
// There always must be a slot span on the active list (could be a sentinel).
|
||||
PA_DCHECK(slot_span);
|
||||
@ -994,7 +1021,8 @@ PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
|
||||
// first active slot span. However, fall back to the slow path if a
|
||||
// higher-order alignment is requested, because an inner slot of an existing
|
||||
// slot span is unlikely to satisfy it.
|
||||
if (LIKELY(slot_span_alignment <= PartitionPageSize() && slot_start)) {
|
||||
if (LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
|
||||
slot_start)) {
|
||||
*is_already_zeroed = false;
|
||||
// This is a fast path, so avoid calling GetUsableSize() on Release builds
|
||||
// as it is more costly. Copy its small bucket path instead.
|
||||
@ -1096,7 +1124,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
|
||||
|
||||
#if defined(PA_HAS_MEMORY_TAGGING)
|
||||
const size_t slot_size = slot_span->bucket->slot_size;
|
||||
if (LIKELY(slot_size <= kMaxMemoryTaggingSize)) {
|
||||
if (LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
|
||||
// TODO(bartekn): |slot_start| shouldn't have MTE tag.
|
||||
slot_start = ::partition_alloc::internal::TagMemoryRangeIncrement(
|
||||
slot_start, slot_size);
|
||||
@ -1119,6 +1147,15 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
|
||||
PA_PREFETCH(slot_span);
|
||||
#endif // defined(PA_HAS_MEMORY_TAGGING)
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
|
||||
size_t slot_size_less_extras =
|
||||
root->AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
|
||||
partition_alloc::internal::PartitionTagIncrementValue(
|
||||
object, slot_size_less_extras);
|
||||
}
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
// TODO(bikineev): Change the condition to LIKELY once PCScan is enabled by
|
||||
// default.
|
||||
if (UNLIKELY(root->ShouldQuarantine(object))) {
|
||||
@ -1204,7 +1241,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
// immediately. Otherwise, defer the operation and zap the memory to turn
|
||||
// potential use-after-free issues into unexploitable crashes.
|
||||
if (UNLIKELY(!ref_count->IsAliveWithNoKnownRefs()))
|
||||
internal::SecureMemset(object, kQuarantinedByte,
|
||||
internal::SecureMemset(object, internal::kQuarantinedByte,
|
||||
slot_span->GetUsableSize(this));
|
||||
|
||||
if (UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
|
||||
@ -1219,7 +1256,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
|
||||
|
||||
// memset() can be really expensive.
|
||||
#if EXPENSIVE_DCHECKS_ARE_ON()
|
||||
memset(SlotStartAddr2Ptr(slot_start), kFreedByte,
|
||||
memset(SlotStartAddr2Ptr(slot_start), internal::kFreedByte,
|
||||
slot_span->GetUtilizedSlotSize()
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
- sizeof(internal::PartitionRefCount)
|
||||
@ -1324,8 +1361,8 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
|
||||
// allocations are uncommon.
|
||||
if (LIKELY(with_thread_cache && !IsDirectMappedBucket(slot_span->bucket))) {
|
||||
size_t bucket_index = slot_span->bucket - this->buckets;
|
||||
auto* thread_cache = internal::ThreadCache::Get();
|
||||
if (LIKELY(internal::ThreadCache::IsValid(thread_cache) &&
|
||||
auto* thread_cache = ThreadCache::Get();
|
||||
if (LIKELY(ThreadCache::IsValid(thread_cache) &&
|
||||
thread_cache->MaybePutInCache(slot_start, bucket_index))) {
|
||||
return;
|
||||
}
|
||||
@ -1358,7 +1395,7 @@ template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionRoot<thread_safe>*
|
||||
PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
|
||||
auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
|
||||
reinterpret_cast<uintptr_t>(slot_span) & SystemPageBaseMask());
|
||||
reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
|
||||
return extent_entry->root;
|
||||
}
|
||||
|
||||
@ -1376,7 +1413,7 @@ PartitionRoot<thread_safe>::FromFirstSuperPage(uintptr_t super_page) {
|
||||
template <bool thread_safe>
|
||||
ALWAYS_INLINE PartitionRoot<thread_safe>*
|
||||
PartitionRoot<thread_safe>::FromAddrInFirstSuperpage(uintptr_t address) {
|
||||
uintptr_t super_page = address & kSuperPageBaseMask;
|
||||
uintptr_t super_page = address & internal::kSuperPageBaseMask;
|
||||
PA_DCHECK(internal::IsReservationStart(super_page));
|
||||
return FromFirstSuperPage(super_page);
|
||||
}
|
||||
@ -1556,8 +1593,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlags(
|
||||
int flags,
|
||||
size_t requested_size,
|
||||
const char* type_name) {
|
||||
return AllocWithFlagsInternal(flags, requested_size, PartitionPageSize(),
|
||||
type_name);
|
||||
return AllocWithFlagsInternal(flags, requested_size,
|
||||
internal::PartitionPageSize(), type_name);
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
@ -1566,8 +1603,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsInternal(
|
||||
size_t requested_size,
|
||||
size_t slot_span_alignment,
|
||||
const char* type_name) {
|
||||
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
|
||||
bits::IsPowerOfTwo(slot_span_alignment));
|
||||
PA_DCHECK(
|
||||
(slot_span_alignment >= internal::PartitionPageSize()) &&
|
||||
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
|
||||
|
||||
PA_DCHECK(flags < AllocFlags::kLastFlag << 1);
|
||||
PA_DCHECK((flags & AllocFlags::kNoHooks) == 0); // Internal only.
|
||||
@ -1608,8 +1646,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
|
||||
int flags,
|
||||
size_t requested_size,
|
||||
size_t slot_span_alignment) {
|
||||
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
|
||||
bits::IsPowerOfTwo(slot_span_alignment));
|
||||
PA_DCHECK(
|
||||
(slot_span_alignment >= internal::PartitionPageSize()) &&
|
||||
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
|
||||
|
||||
// The thread cache is added "in the middle" of the main allocator, that is:
|
||||
// - After all the cookie/ref-count management
|
||||
@ -1648,11 +1687,12 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
|
||||
// thread cache will not be able to satisfy it.
|
||||
//
|
||||
// LIKELY: performance-sensitive partitions use the thread cache.
|
||||
if (LIKELY(with_thread_cache && slot_span_alignment <= PartitionPageSize())) {
|
||||
auto* tcache = internal::ThreadCache::Get();
|
||||
if (LIKELY(with_thread_cache &&
|
||||
slot_span_alignment <= internal::PartitionPageSize())) {
|
||||
auto* tcache = ThreadCache::Get();
|
||||
// LIKELY: Typically always true, except for the very first allocation of
|
||||
// this thread.
|
||||
if (LIKELY(internal::ThreadCache::IsValid(tcache))) {
|
||||
if (LIKELY(ThreadCache::IsValid(tcache))) {
|
||||
slot_start = tcache->GetFromCache(bucket_index, &slot_size);
|
||||
} else {
|
||||
slot_start = MaybeInitThreadCacheAndAlloc(bucket_index, &slot_size);
|
||||
@ -1754,7 +1794,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
|
||||
if (LIKELY(!zero_fill)) {
|
||||
// memset() can be really expensive.
|
||||
#if EXPENSIVE_DCHECKS_ARE_ON()
|
||||
memset(object, kUninitializedByte, usable_size);
|
||||
memset(object, internal::kUninitializedByte, usable_size);
|
||||
#endif
|
||||
} else if (!is_already_zeroed) {
|
||||
memset(object, 0, usable_size);
|
||||
@ -1764,8 +1804,13 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
|
||||
// TODO(keishi): Add LIKELY when brp is fully enabled as |brp_enabled| will be
|
||||
// false only for the aligned partition.
|
||||
if (brp_enabled()) {
|
||||
new (internal::PartitionRefCountPointer(slot_start))
|
||||
auto* ref_count = new (internal::PartitionRefCountPointer(slot_start))
|
||||
internal::PartitionRefCount();
|
||||
#if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
|
||||
ref_count->SetRequestedSize(requested_size);
|
||||
#else
|
||||
(void)ref_count;
|
||||
#endif
|
||||
}
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
@ -1828,13 +1873,13 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
|
||||
PA_DCHECK(allow_aligned_alloc);
|
||||
PA_DCHECK(!extras_offset);
|
||||
// This is mandated by |posix_memalign()|, so should never fire.
|
||||
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
|
||||
// Catch unsupported alignment requests early.
|
||||
PA_CHECK(alignment <= kMaxSupportedAlignment);
|
||||
PA_CHECK(alignment <= internal::kMaxSupportedAlignment);
|
||||
size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
|
||||
|
||||
size_t adjusted_size = requested_size;
|
||||
if (alignment <= PartitionPageSize()) {
|
||||
if (alignment <= internal::PartitionPageSize()) {
|
||||
// Handle cases such as size = 16, alignment = 64.
|
||||
// Wastes memory when a large alignment is requested with a small size, but
|
||||
// this is hard to avoid, and should not be too common.
|
||||
@ -1844,11 +1889,13 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
|
||||
// PartitionAlloc only guarantees alignment for power-of-two sized
|
||||
// allocations. To make sure this applies here, round up the allocation
|
||||
// size.
|
||||
raw_size = static_cast<size_t>(1)
|
||||
raw_size =
|
||||
static_cast<size_t>(1)
|
||||
<< (sizeof(size_t) * 8 -
|
||||
base::bits::CountLeadingZeroBits(raw_size - 1));
|
||||
partition_alloc::internal::base::bits::CountLeadingZeroBits(
|
||||
raw_size - 1));
|
||||
}
|
||||
PA_DCHECK(base::bits::IsPowerOfTwo(raw_size));
|
||||
PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
|
||||
// Adjust back, because AllocWithFlagsNoHooks/Alloc will adjust it again.
|
||||
adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
|
||||
|
||||
@ -1867,7 +1914,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
|
||||
|
||||
// Slot spans are naturally aligned on partition page size, but make sure you
|
||||
// don't pass anything less, because it'll mess up callee's calculations.
|
||||
size_t slot_span_alignment = std::max(alignment, PartitionPageSize());
|
||||
size_t slot_span_alignment =
|
||||
std::max(alignment, internal::PartitionPageSize());
|
||||
bool no_hooks = flags & AllocFlags::kNoHooks;
|
||||
void* object =
|
||||
no_hooks
|
||||
@ -1921,11 +1969,11 @@ PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
|
||||
auto& bucket =
|
||||
bucket_at(SizeToBucketIndex(size, with_denser_bucket_distribution));
|
||||
PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
|
||||
PA_DCHECK(!(bucket.slot_size % kSmallestBucket));
|
||||
PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
|
||||
|
||||
if (LIKELY(!bucket.is_direct_mapped())) {
|
||||
size = bucket.slot_size;
|
||||
} else if (size > MaxDirectMapped()) {
|
||||
} else if (size > internal::MaxDirectMapped()) {
|
||||
// Too large to allocate => return the size unchanged.
|
||||
} else {
|
||||
size = GetDirectMapSlotSize(size);
|
||||
@ -1938,26 +1986,32 @@ PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
|
||||
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
|
||||
|
||||
static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
|
||||
kPartitionCachelineSize,
|
||||
internal::kPartitionCachelineSize,
|
||||
"Padding is incorrect");
|
||||
} // namespace partition_alloc
|
||||
|
||||
} // namespace base
|
||||
|
||||
namespace partition_alloc {
|
||||
namespace base {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::PartitionOptions;
|
||||
using ::base::PurgeFlags;
|
||||
using ::partition_alloc::PartitionOptions;
|
||||
using ::partition_alloc::PurgeFlags;
|
||||
using ::partition_alloc::ThreadSafePartitionRoot;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::internal::ScopedSyscallTimer;
|
||||
using ::partition_alloc::internal::ScopedSyscallTimer;
|
||||
|
||||
#if BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
using ::partition_alloc::internal::PartitionAllocFreeForRefCounting;
|
||||
using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
|
||||
using ::partition_alloc::internal::PartitionAllocIsValidPtrDelta;
|
||||
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace partition_alloc
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
|
||||
|
138
src/base/allocator/partition_allocator/partition_tag.h
Normal file
138
src/base/allocator/partition_allocator/partition_tag.h
Normal file
@ -0,0 +1,138 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
||||
|
||||
// This file defines types and functions for `MTECheckedPtr<T>` (cf.
|
||||
// `tagging.h`, which deals with real ARM MTE).
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/allocator/partition_allocator/partition_cookie.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
// Use 8 bits for the partition tag.
|
||||
// TODO(tasak): add a description about the partition tag.
|
||||
using PartitionTag = uint8_t;
|
||||
|
||||
static_assert(
|
||||
sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize,
|
||||
"sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize.");
|
||||
|
||||
ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
|
||||
// TODO(crbug.com/1307514): Add direct map support. For now, just assume
|
||||
// that direct maps don't have tags.
|
||||
PA_DCHECK(internal::IsManagedByNormalBuckets(addr));
|
||||
|
||||
uintptr_t bitmap_base =
|
||||
internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask);
|
||||
const size_t bitmap_end_offset =
|
||||
internal::PartitionPageSize() + internal::ReservedTagBitmapSize();
|
||||
PA_DCHECK((addr & internal::kSuperPageOffsetMask) >= bitmap_end_offset);
|
||||
uintptr_t offset_in_super_page =
|
||||
(addr & internal::kSuperPageOffsetMask) - bitmap_end_offset;
|
||||
size_t offset_in_bitmap = offset_in_super_page >>
|
||||
internal::tag_bitmap::kBytesPerPartitionTagShift
|
||||
<< internal::tag_bitmap::kPartitionTagSizeShift;
|
||||
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
|
||||
return PartitionTagPointer(
|
||||
internal::UnmaskPtr(reinterpret_cast<uintptr_t>(ptr)));
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
ALWAYS_INLINE void PartitionTagSetValue(uintptr_t addr,
|
||||
size_t size,
|
||||
PartitionTag value) {
|
||||
PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0);
|
||||
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
|
||||
PartitionTag* tag_ptr = PartitionTagPointer(addr);
|
||||
if (sizeof(PartitionTag) == 1) {
|
||||
memset(tag_ptr, value, tag_count);
|
||||
} else {
|
||||
while (tag_count-- > 0)
|
||||
*tag_ptr++ = value;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PartitionTagSetValue(void* ptr,
|
||||
size_t size,
|
||||
PartitionTag value) {
|
||||
PartitionTagSetValue(reinterpret_cast<uintptr_t>(ptr), size, value);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {
|
||||
return *PartitionTagPointer(ptr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PartitionTagClearValue(void* ptr, size_t size) {
|
||||
size_t tag_region_size = size >> tag_bitmap::kBytesPerPartitionTagShift
|
||||
<< tag_bitmap::kPartitionTagSizeShift;
|
||||
PA_DCHECK(!memchr(PartitionTagPointer(ptr), 0, tag_region_size));
|
||||
memset(PartitionTagPointer(ptr), 0, tag_region_size);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) {
|
||||
PartitionTag tag = PartitionTagGetValue(ptr);
|
||||
PartitionTag new_tag = tag;
|
||||
++new_tag;
|
||||
new_tag += !new_tag; // Avoid 0.
|
||||
#if DCHECK_IS_ON()
|
||||
// This verifies that tags for the entire slot have the same value and that
|
||||
// |size| doesn't exceed the slot size.
|
||||
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
|
||||
PartitionTag* tag_ptr = PartitionTagPointer(ptr);
|
||||
while (tag_count-- > 0) {
|
||||
PA_DCHECK(tag == *tag_ptr);
|
||||
tag_ptr++;
|
||||
}
|
||||
#endif
|
||||
PartitionTagSetValue(ptr, size, new_tag);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#else // No-op versions
|
||||
|
||||
using PartitionTag = uint8_t;
|
||||
|
||||
ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
|
||||
PA_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
ALWAYS_INLINE void PartitionTagSetValue(void*, size_t, PartitionTag) {}
|
||||
|
||||
ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PartitionTagClearValue(void* ptr, size_t) {}
|
||||
|
||||
ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) {}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
145
src/base/allocator/partition_allocator/partition_tag_bitmap.h
Normal file
145
src/base/allocator/partition_allocator/partition_tag_bitmap.h
Normal file
@ -0,0 +1,145 @@
|
||||
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
namespace tag_bitmap {
|
||||
// kPartitionTagSize should be equal to sizeof(PartitionTag).
|
||||
// PartitionTag is defined in partition_tag.h and static_assert there
|
||||
// checks the condition.
|
||||
static constexpr size_t kPartitionTagSizeShift = 0;
|
||||
static constexpr size_t kPartitionTagSize = 1U << kPartitionTagSizeShift;
|
||||
|
||||
static constexpr size_t kBytesPerPartitionTagShift = 4;
|
||||
// One partition tag is assigned per |kBytesPerPartitionTag| bytes in the slot
|
||||
// spans.
|
||||
// +-----------+ 0
|
||||
// | | ====> 1 partition tag
|
||||
// +-----------+ kBytesPerPartitionTag
|
||||
// | | ====> 1 partition tag
|
||||
// +-----------+ 2*kBytesPerPartitionTag
|
||||
// ...
|
||||
// +-----------+ slot_size
|
||||
static constexpr size_t kBytesPerPartitionTag = 1U
|
||||
<< kBytesPerPartitionTagShift;
|
||||
static_assert(
|
||||
kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
|
||||
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
|
||||
|
||||
static constexpr size_t kBytesPerPartitionTagRatio =
|
||||
kBytesPerPartitionTag / kPartitionTagSize;
|
||||
|
||||
static_assert(kBytesPerPartitionTag > 0,
|
||||
"kBytesPerPartitionTag should be larger than 0");
|
||||
static_assert(
|
||||
kBytesPerPartitionTag % kPartitionTagSize == 0,
|
||||
"kBytesPerPartitionTag should be multiples of sizeof(PartitionTag).");
|
||||
|
||||
constexpr size_t CeilCountOfUnits(size_t size, size_t unit_size) {
|
||||
return (size + unit_size - 1) / unit_size;
|
||||
}
|
||||
|
||||
} // namespace tag_bitmap
|
||||
|
||||
// kTagBitmapSize is calculated in the following way:
|
||||
// (1) kSuperPageSize - 2 * PartitionPageSize() = kTagBitmapSize +
|
||||
// SlotSpanSize()
|
||||
// (2) kTagBitmapSize >= SlotSpanSize() / kBytesPerPartitionTag *
|
||||
// sizeof(PartitionTag)
|
||||
//--
|
||||
// (1)' SlotSpanSize() = kSuperPageSize - 2 * PartitionPageSize() -
|
||||
// kTagBitmapSize
|
||||
// (2)' SlotSpanSize() <= kTagBitmapSize * Y
|
||||
// (3)' Y = kBytesPerPartitionTag / sizeof(PartitionTag) =
|
||||
// kBytesPerPartitionTagRatio
|
||||
//
|
||||
// kTagBitmapSize * Y >= kSuperPageSize - 2 * PartitionPageSize() -
|
||||
// kTagBitmapSize (1 + Y) * kTagBimapSize >= kSuperPageSize - 2 *
|
||||
// PartitionPageSize()
|
||||
// Finally,
|
||||
// kTagBitmapSize >= (kSuperPageSize - 2 * PartitionPageSize()) / (1 + Y)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
NumPartitionPagesPerTagBitmap() {
|
||||
return tag_bitmap::CeilCountOfUnits(
|
||||
kSuperPageSize / PartitionPageSize() - 2,
|
||||
tag_bitmap::kBytesPerPartitionTagRatio + 1);
|
||||
}
|
||||
|
||||
// To make guard pages between the tag bitmap and the slot span, calculate the
|
||||
// number of SystemPages of TagBitmap. If kNumSystemPagesPerTagBitmap *
|
||||
// SystemPageSize() < kTagBitmapSize, guard pages will be created. (c.f. no
|
||||
// guard pages if sizeof(PartitionTag) == 2.)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
NumSystemPagesPerTagBitmap() {
|
||||
return tag_bitmap::CeilCountOfUnits(
|
||||
kSuperPageSize / SystemPageSize() -
|
||||
2 * PartitionPageSize() / SystemPageSize(),
|
||||
tag_bitmap::kBytesPerPartitionTagRatio + 1);
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
ActualTagBitmapSize() {
|
||||
return NumSystemPagesPerTagBitmap() * SystemPageSize();
|
||||
}
|
||||
|
||||
// PartitionPageSize-aligned tag bitmap size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
ReservedTagBitmapSize() {
|
||||
return PartitionPageSize() * NumPartitionPagesPerTagBitmap();
|
||||
}
|
||||
|
||||
#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||
static_assert(ActualTagBitmapSize() <= ReservedTagBitmapSize(),
|
||||
"kActualTagBitmapSize should be smaller than or equal to "
|
||||
"kReservedTagBitmapSize.");
|
||||
static_assert(ReservedTagBitmapSize() - ActualTagBitmapSize() <
|
||||
PartitionPageSize(),
|
||||
"Unused space in the tag bitmap should be smaller than "
|
||||
"PartitionPageSize()");
|
||||
|
||||
// The region available for slot spans is the reminder of the super page, after
|
||||
// taking away the first and last partition page (for metadata and guard pages)
|
||||
// and partition pages reserved for the tag bitmap.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
|
||||
SlotSpansSize() {
|
||||
return kSuperPageSize - 2 * PartitionPageSize() - ReservedTagBitmapSize();
|
||||
}
|
||||
|
||||
static_assert(ActualTagBitmapSize() * tag_bitmap::kBytesPerPartitionTagRatio >=
|
||||
SlotSpansSize(),
|
||||
"bitmap is large enough to cover slot spans");
|
||||
static_assert((ActualTagBitmapSize() - PartitionPageSize()) *
|
||||
tag_bitmap::kBytesPerPartitionTagRatio <
|
||||
SlotSpansSize(),
|
||||
"any smaller bitmap wouldn't suffice to cover slots spans");
|
||||
#endif // PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||
|
||||
#else
|
||||
|
||||
constexpr ALWAYS_INLINE size_t NumPartitionPagesPerTagBitmap() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t ActualTagBitmapSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t ReservedTagBitmapSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
@ -6,6 +6,7 @@
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TLS_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -8,8 +8,7 @@
|
||||
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||
#include "base/logging.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Logging requires allocations. This logger allows reentrant allocations to
|
||||
// happen within the allocator context.
|
||||
@ -19,8 +18,8 @@ struct LoggerWithAllowedAllocations : ScopedAllowAllocations,
|
||||
};
|
||||
|
||||
#define PA_PCSCAN_VLOG_STREAM(verbose_level) \
|
||||
::base::internal::LoggerWithAllowedAllocations(__FILE__, __LINE__, \
|
||||
-(verbose_level)) \
|
||||
::partition_alloc::internal::LoggerWithAllowedAllocations( \
|
||||
__FILE__, __LINE__, -(verbose_level)) \
|
||||
.stream()
|
||||
|
||||
// Logging macro that is meant to be used inside *Scan. Generally, reentrancy
|
||||
@ -34,7 +33,6 @@ struct LoggerWithAllowedAllocations : ScopedAllowAllocations,
|
||||
#define PA_PCSCAN_VLOG(verbose_level) \
|
||||
LAZY_STREAM(PA_PCSCAN_VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_
|
||||
|
@ -8,8 +8,7 @@
|
||||
|
||||
#include "base/no_destructor.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
namespace {
|
||||
constexpr PartitionOptions kConfig{
|
||||
@ -37,5 +36,4 @@ void ReinitPCScanMetadataAllocatorForTesting() {
|
||||
PCScanMetadataAllocator().Init(kConfig);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
ThreadSafePartitionRoot& PCScanMetadataAllocator();
|
||||
void ReinitPCScanMetadataAllocatorForTesting();
|
||||
@ -79,7 +78,18 @@ struct PCScanMetadataDeleter final {
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::AllocatedOnPCScanMetadataPartition;
|
||||
using ::partition_alloc::internal::MakePCScanMetadata;
|
||||
using ::partition_alloc::internal::MetadataAllocator;
|
||||
using ::partition_alloc::internal::PCScanMetadataAllocator;
|
||||
using ::partition_alloc::internal::PCScanMetadataDeleter;
|
||||
using ::partition_alloc::internal::ReinitPCScanMetadataAllocatorForTesting;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_METADATA_ALLOCATOR_H_
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
void PCScan::Initialize(InitConfig config) {
|
||||
PCScanInternal::Instance().Initialize(config);
|
||||
@ -54,7 +53,7 @@ void PCScan::PerformScanIfNeeded(InvocationMode invocation_mode) {
|
||||
|
||||
void PCScan::PerformDelayedScan(int64_t delay_in_microseconds) {
|
||||
PCScanInternal::Instance().PerformDelayedScan(
|
||||
Microseconds(delay_in_microseconds));
|
||||
base::Microseconds(delay_in_microseconds));
|
||||
}
|
||||
|
||||
void PCScan::JoinScan() {
|
||||
@ -104,11 +103,10 @@ void PCScan::FinishScanForTesting() {
|
||||
PCScanInternal::Instance().FinishScanForTesting(); // IN-TEST
|
||||
}
|
||||
|
||||
void PCScan::RegisterStatsReporter(StatsReporter* reporter) {
|
||||
void PCScan::RegisterStatsReporter(partition_alloc::StatsReporter* reporter) {
|
||||
PCScanInternal::Instance().RegisterStatsReporter(reporter);
|
||||
}
|
||||
|
||||
PCScan PCScan::instance_ CONSTINIT;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -20,7 +20,7 @@
|
||||
// We currently disable it to improve the runtime.
|
||||
#define PA_STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED 0
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
class StatsReporter;
|
||||
|
||||
@ -141,7 +141,7 @@ class BASE_EXPORT PCScan final {
|
||||
inline static PCScanScheduler& scheduler();
|
||||
|
||||
// Registers reporting class.
|
||||
static void RegisterStatsReporter(StatsReporter* reporter);
|
||||
static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
|
||||
|
||||
private:
|
||||
class PCScanThread;
|
||||
@ -278,6 +278,12 @@ inline PCScanScheduler& PCScan::scheduler() {
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
||||
// TODO(crbug.com/1288247): Remove this when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::PCScan;
|
||||
|
||||
} // namespace base::internal
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_H_
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "base/allocator/partition_allocator/address_pool_manager.h"
|
||||
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/allocation_guard.h"
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
@ -39,7 +40,6 @@
|
||||
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/cpu.h"
|
||||
#include "base/debug/alias.h"
|
||||
@ -62,10 +62,14 @@
|
||||
#define PA_SCAN_INLINE ALWAYS_INLINE
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
[[noreturn]] BASE_EXPORT NOINLINE NOT_TAIL_CALLED void DoubleFreeAttempt() {
|
||||
namespace base {
|
||||
using ::base::MakeRefCounted;
|
||||
using ::base::RefCountedThreadSafe;
|
||||
} // namespace base
|
||||
|
||||
[[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeAttempt() {
|
||||
NO_CODE_FOLDING();
|
||||
IMMEDIATE_CRASH();
|
||||
}
|
||||
@ -345,7 +349,8 @@ class SuperPageSnapshot final {
|
||||
static constexpr size_t kStateBitmapMinReservedSize =
|
||||
__builtin_constant_p(ReservedStateBitmapSize())
|
||||
? ReservedStateBitmapSize()
|
||||
: base::bits::AlignUp(sizeof(AllocationStateMap),
|
||||
: partition_alloc::internal::base::bits::AlignUp(
|
||||
sizeof(AllocationStateMap),
|
||||
kMinPartitionPageSize);
|
||||
// Take into account guard partition page at the end of super-page.
|
||||
static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
|
||||
@ -984,9 +989,9 @@ void UnmarkInCardTable(uintptr_t slot_start,
|
||||
const size_t slot_size = slot_span->bucket->slot_size;
|
||||
if (slot_size >= SystemPageSize()) {
|
||||
const uintptr_t discard_end =
|
||||
bits::AlignDown(slot_start + slot_size, SystemPageSize());
|
||||
base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
|
||||
const uintptr_t discard_begin =
|
||||
bits::AlignUp(slot_start, SystemPageSize());
|
||||
base::bits::AlignUp(slot_start, SystemPageSize());
|
||||
const intptr_t discard_size = discard_end - discard_begin;
|
||||
if (discard_size > 0) {
|
||||
DiscardSystemPages(discard_begin, discard_size);
|
||||
@ -1187,12 +1192,12 @@ class PCScan::PCScanThread final {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
PA_DCHECK(!posted_task_.get());
|
||||
posted_task_ = std::move(task);
|
||||
wanted_delay_ = TimeDelta();
|
||||
wanted_delay_ = base::TimeDelta();
|
||||
}
|
||||
condvar_.notify_one();
|
||||
}
|
||||
|
||||
void PostDelayedTask(TimeDelta delay) {
|
||||
void PostDelayedTask(base::TimeDelta delay) {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (posted_task_.get()) {
|
||||
@ -1256,7 +1261,7 @@ class PCScan::PCScanThread final {
|
||||
// Differentiate between a posted task and a delayed task schedule.
|
||||
if (posted_task_.get()) {
|
||||
std::swap(current_task, posted_task_);
|
||||
wanted_delay_ = TimeDelta();
|
||||
wanted_delay_ = base::TimeDelta();
|
||||
} else {
|
||||
PA_DCHECK(wanted_delay_.is_zero());
|
||||
}
|
||||
@ -1275,7 +1280,7 @@ class PCScan::PCScanThread final {
|
||||
std::mutex mutex_;
|
||||
std::condition_variable condvar_;
|
||||
TaskHandle posted_task_;
|
||||
TimeDelta wanted_delay_;
|
||||
base::TimeDelta wanted_delay_;
|
||||
};
|
||||
|
||||
PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
|
||||
@ -1306,7 +1311,7 @@ void PCScanInternal::Initialize(PCScan::InitConfig config) {
|
||||
scannable_roots_ = RootsMap();
|
||||
nonscannable_roots_ = RootsMap();
|
||||
|
||||
static StatsReporter s_no_op_reporter;
|
||||
static partition_alloc::StatsReporter s_no_op_reporter;
|
||||
PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
|
||||
|
||||
// Don't initialize PCScanThread::Instance() as otherwise sandbox complains
|
||||
@ -1373,7 +1378,7 @@ void PCScanInternal::PerformScanIfNeeded(
|
||||
PerformScan(invocation_mode);
|
||||
}
|
||||
|
||||
void PCScanInternal::PerformDelayedScan(TimeDelta delay) {
|
||||
void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
|
||||
PCScan::PCScanThread::Instance().PostDelayedTask(delay);
|
||||
}
|
||||
|
||||
@ -1561,14 +1566,16 @@ void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
|
||||
// slot-spans doesn't need to be protected (the allocator will enter the
|
||||
// safepoint before trying to allocate from it).
|
||||
PA_SCAN_DCHECK(write_protector_.get());
|
||||
write_protector_->ProtectPages(begin,
|
||||
base::bits::AlignUp(size, SystemPageSize()));
|
||||
write_protector_->ProtectPages(
|
||||
begin,
|
||||
partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
|
||||
}
|
||||
|
||||
void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
|
||||
PA_SCAN_DCHECK(write_protector_.get());
|
||||
write_protector_->UnprotectPages(begin,
|
||||
base::bits::AlignUp(size, SystemPageSize()));
|
||||
write_protector_->UnprotectPages(
|
||||
begin,
|
||||
partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
|
||||
}
|
||||
|
||||
void PCScanInternal::ClearRootsForTesting() {
|
||||
@ -1606,15 +1613,15 @@ void PCScanInternal::FinishScanForTesting() {
|
||||
current_task->RunFromScanner();
|
||||
}
|
||||
|
||||
void PCScanInternal::RegisterStatsReporter(StatsReporter* reporter) {
|
||||
void PCScanInternal::RegisterStatsReporter(
|
||||
partition_alloc::StatsReporter* reporter) {
|
||||
PA_DCHECK(reporter);
|
||||
stats_reporter_ = reporter;
|
||||
}
|
||||
|
||||
StatsReporter& PCScanInternal::GetReporter() {
|
||||
partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
|
||||
PA_DCHECK(stats_reporter_);
|
||||
return *stats_reporter_;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -20,11 +20,10 @@
|
||||
#include "base/memory/scoped_refptr.h"
|
||||
#include "base/no_destructor.h"
|
||||
|
||||
namespace base {
|
||||
// TODO(crbug.com/1288247): Remove this when migration is complete.
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
class StatsReporter;
|
||||
|
||||
namespace internal {
|
||||
class StarScanSnapshot;
|
||||
|
||||
class PCScanTask;
|
||||
|
||||
@ -61,7 +60,7 @@ class PCScanInternal final {
|
||||
|
||||
void PerformScan(PCScan::InvocationMode);
|
||||
void PerformScanIfNeeded(PCScan::InvocationMode);
|
||||
void PerformDelayedScan(TimeDelta delay);
|
||||
void PerformDelayedScan(base::TimeDelta delay);
|
||||
void JoinScan();
|
||||
|
||||
TaskHandle CurrentPCScanTask() const;
|
||||
@ -107,19 +106,19 @@ class PCScanInternal final {
|
||||
void ReinitForTesting(PCScan::InitConfig); // IN-TEST
|
||||
void FinishScanForTesting(); // IN-TEST
|
||||
|
||||
void RegisterStatsReporter(StatsReporter* reporter);
|
||||
StatsReporter& GetReporter();
|
||||
void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
|
||||
partition_alloc::StatsReporter& GetReporter();
|
||||
|
||||
private:
|
||||
friend base::NoDestructor<PCScanInternal>;
|
||||
friend class StarScanSnapshot;
|
||||
friend class partition_alloc::internal::StarScanSnapshot;
|
||||
|
||||
using StackTops = std::unordered_map<
|
||||
PlatformThreadId,
|
||||
::base::PlatformThreadId,
|
||||
void*,
|
||||
std::hash<PlatformThreadId>,
|
||||
std::hash<::base::PlatformThreadId>,
|
||||
std::equal_to<>,
|
||||
MetadataAllocator<std::pair<const PlatformThreadId, void*>>>;
|
||||
MetadataAllocator<std::pair<const ::base::PlatformThreadId, void*>>>;
|
||||
|
||||
PCScanInternal();
|
||||
|
||||
@ -142,13 +141,18 @@ class PCScanInternal final {
|
||||
const SimdSupport simd_support_;
|
||||
|
||||
std::unique_ptr<WriteProtector> write_protector_;
|
||||
StatsReporter* stats_reporter_ = nullptr;
|
||||
partition_alloc::StatsReporter* stats_reporter_ = nullptr;
|
||||
|
||||
bool is_initialized_ = false;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
} // namespace base
|
||||
// TODO(crbug.com/1288247): Remove this when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::PCScanInternal;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_INTERNAL_H_
|
||||
|
@ -15,8 +15,7 @@
|
||||
#include "base/bind.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// static
|
||||
constexpr size_t QuarantineData::kQuarantineSizeMinLimit;
|
||||
@ -34,7 +33,8 @@ void PCScanSchedulingBackend::EnableScheduling() {
|
||||
scheduling_enabled_.store(true, std::memory_order_relaxed);
|
||||
// Check if *Scan needs to be run immediately.
|
||||
if (NeedsToImmediatelyScan())
|
||||
PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
|
||||
::base::internal::PCScan::PerformScan(
|
||||
::base::internal::PCScan::InvocationMode::kNonBlocking);
|
||||
}
|
||||
|
||||
size_t PCScanSchedulingBackend::ScanStarted() {
|
||||
@ -43,8 +43,8 @@ size_t PCScanSchedulingBackend::ScanStarted() {
|
||||
return data.current_size.exchange(0, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
TimeDelta PCScanSchedulingBackend::UpdateDelayedSchedule() {
|
||||
return TimeDelta();
|
||||
base::TimeDelta PCScanSchedulingBackend::UpdateDelayedSchedule() {
|
||||
return base::TimeDelta();
|
||||
}
|
||||
|
||||
// static
|
||||
@ -92,7 +92,7 @@ bool MUAwareTaskBasedBackend::LimitReached() {
|
||||
bool should_reschedule = false;
|
||||
base::TimeDelta reschedule_delay;
|
||||
{
|
||||
PartitionAutoLock guard(scheduler_lock_);
|
||||
ScopedGuard guard(scheduler_lock_);
|
||||
// At this point we reached a limit where the schedule generally wants to
|
||||
// trigger a scan.
|
||||
if (hard_limit_) {
|
||||
@ -120,7 +120,7 @@ bool MUAwareTaskBasedBackend::LimitReached() {
|
||||
// 4. Otherwise, the soft limit would trigger a scan immediately if the
|
||||
// mutator utilization requirement is satisfied.
|
||||
reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
|
||||
if (reschedule_delay <= TimeDelta()) {
|
||||
if (reschedule_delay <= base::TimeDelta()) {
|
||||
// May invoke scan immediately.
|
||||
return true;
|
||||
}
|
||||
@ -142,7 +142,7 @@ bool MUAwareTaskBasedBackend::LimitReached() {
|
||||
}
|
||||
|
||||
size_t MUAwareTaskBasedBackend::ScanStarted() {
|
||||
PartitionAutoLock guard(scheduler_lock_);
|
||||
ScopedGuard guard(scheduler_lock_);
|
||||
|
||||
return PCScanSchedulingBackend::ScanStarted();
|
||||
}
|
||||
@ -153,7 +153,7 @@ void MUAwareTaskBasedBackend::UpdateScheduleAfterScan(
|
||||
size_t heap_size) {
|
||||
scheduler_.AccountFreed(survived_bytes);
|
||||
|
||||
PartitionAutoLock guard(scheduler_lock_);
|
||||
ScopedGuard guard(scheduler_lock_);
|
||||
|
||||
// |heap_size| includes the current quarantine size, we intentionally leave
|
||||
// some slack till hitting the limit.
|
||||
@ -180,7 +180,7 @@ bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
|
||||
bool should_reschedule = false;
|
||||
base::TimeDelta reschedule_delay;
|
||||
{
|
||||
PartitionAutoLock guard(scheduler_lock_);
|
||||
ScopedGuard guard(scheduler_lock_);
|
||||
// If |hard_limit_| was set to zero, the soft limit was reached. Bail out if
|
||||
// it's not.
|
||||
if (hard_limit_)
|
||||
@ -188,7 +188,7 @@ bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
|
||||
|
||||
// Check if mutator utilization requiremet is satisfied.
|
||||
reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
|
||||
if (reschedule_delay <= TimeDelta()) {
|
||||
if (reschedule_delay <= base::TimeDelta()) {
|
||||
// May invoke scan immediately.
|
||||
return true;
|
||||
}
|
||||
@ -205,13 +205,12 @@ bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
|
||||
return false;
|
||||
}
|
||||
|
||||
TimeDelta MUAwareTaskBasedBackend::UpdateDelayedSchedule() {
|
||||
PartitionAutoLock guard(scheduler_lock_);
|
||||
base::TimeDelta MUAwareTaskBasedBackend::UpdateDelayedSchedule() {
|
||||
ScopedGuard guard(scheduler_lock_);
|
||||
// TODO(1197479): Adjust schedule to current heap sizing.
|
||||
const auto delay = earliest_next_scan_time_ - base::TimeTicks::Now();
|
||||
PA_PCSCAN_VLOG(3) << "Schedule is off by " << delay.InMillisecondsF() << "ms";
|
||||
return delay >= TimeDelta() ? delay : TimeDelta();
|
||||
return delay >= base::TimeDelta() ? delay : base::TimeDelta();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -10,12 +10,10 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_lock.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
class PCScanScheduler;
|
||||
|
||||
@ -66,7 +64,7 @@ class BASE_EXPORT PCScanSchedulingBackend {
|
||||
|
||||
// Invoked by PCScan to ask for a new timeout for a scheduled PCScan task.
|
||||
// Only invoked if scheduler requests a delayed scan at some point.
|
||||
virtual TimeDelta UpdateDelayedSchedule();
|
||||
virtual base::TimeDelta UpdateDelayedSchedule();
|
||||
|
||||
protected:
|
||||
inline bool SchedulingDisabled() const;
|
||||
@ -109,7 +107,7 @@ class BASE_EXPORT MUAwareTaskBasedBackend final
|
||||
bool LimitReached() final;
|
||||
size_t ScanStarted() final;
|
||||
void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
|
||||
TimeDelta UpdateDelayedSchedule() final;
|
||||
base::TimeDelta UpdateDelayedSchedule() final;
|
||||
|
||||
private:
|
||||
// Limit triggering the scheduler. If `kTargetMutatorUtilizationPercent` is
|
||||
@ -129,7 +127,7 @@ class BASE_EXPORT MUAwareTaskBasedBackend final
|
||||
// Callback to schedule a delayed scan.
|
||||
const ScheduleDelayedScanFunc schedule_delayed_scan_;
|
||||
|
||||
PartitionLock scheduler_lock_;
|
||||
Lock scheduler_lock_;
|
||||
size_t hard_limit_ GUARDED_BY(scheduler_lock_){0};
|
||||
base::TimeTicks earliest_next_scan_time_ GUARDED_BY(scheduler_lock_);
|
||||
|
||||
@ -196,7 +194,16 @@ bool PCScanScheduler::AccountFreed(size_t size) {
|
||||
backend_->LimitReached();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::LimitBackend;
|
||||
using ::partition_alloc::internal::MUAwareTaskBasedBackend;
|
||||
using ::partition_alloc::internal::PCScanScheduler;
|
||||
using ::partition_alloc::internal::QuarantineData;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_PCSCAN_SCHEDULING_H_
|
||||
|
@ -14,8 +14,7 @@
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/rand_util.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
template <typename T>
|
||||
class RacefulWorklist {
|
||||
@ -138,6 +137,11 @@ void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
|
||||
worklist_.fully_visited_.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
using ::partition_alloc::internal::RacefulWorklist;
|
||||
}
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
|
||||
|
@ -34,8 +34,7 @@
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Iterates over range of memory using the best available SIMD extension.
|
||||
// Assumes that 64bit platforms have cage support and the begin pointer of
|
||||
@ -223,7 +222,13 @@ void ScanLoop<Derived>::RunNEON(uintptr_t begin, uintptr_t end) {
|
||||
}
|
||||
#endif // defined(PA_STARSCAN_NEON_SUPPORTED)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove this when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::ScanLoop;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SCAN_LOOP_H_
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
std::unique_ptr<StarScanSnapshot> StarScanSnapshot::Create(
|
||||
const PCScanInternal& pcscan) {
|
||||
@ -44,5 +43,4 @@ StarScanSnapshot::StarScanSnapshot(const PCScanInternal& pcscan) {
|
||||
|
||||
StarScanSnapshot::~StarScanSnapshot() = default;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -11,10 +11,7 @@
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan_internal.h"
|
||||
#include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class PCScanInternal;
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
class StarScanSnapshot final : public AllocatedOnPCScanMetadataPartition {
|
||||
public:
|
||||
@ -92,7 +89,13 @@ StarScanSnapshot::SweepingView::SweepingView(StarScanSnapshot& snapshot)
|
||||
StarScanSnapshot::UnprotectingView::UnprotectingView(StarScanSnapshot& snapshot)
|
||||
: StarScanSnapshot::ViewBase(snapshot.unprotect_worklist_) {}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove this when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::StarScanSnapshot;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_SNAPSHOT_H_
|
||||
|
@ -21,8 +21,7 @@
|
||||
extern "C" void* __libc_stack_end;
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
|
||||
@ -144,5 +143,4 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Returns the current stack pointer.
|
||||
// TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
|
||||
@ -44,7 +43,16 @@ class BASE_EXPORT Stack final {
|
||||
void* stack_top_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::GetStackPointer;
|
||||
using ::partition_alloc::internal::GetStackTop;
|
||||
using ::partition_alloc::internal::Stack;
|
||||
using ::partition_alloc::internal::StackVisitor;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STACK_STACK_H_
|
||||
|
@ -7,8 +7,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Defines what thread executes a StarScan task.
|
||||
enum class Context {
|
||||
@ -26,7 +25,14 @@ enum class SimdSupport : uint8_t {
|
||||
kNEON,
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::Context;
|
||||
using ::partition_alloc::internal::SimdSupport;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STARSCAN_FWD_H_
|
||||
|
@ -15,12 +15,11 @@
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
#include "base/allocator/partition_allocator/base/bits.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/compiler_specific.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Bitmap which tracks allocation states. An allocation can be in one of 3
|
||||
// states:
|
||||
@ -70,7 +69,7 @@ class StateBitmap final {
|
||||
using CellType = uintptr_t;
|
||||
static constexpr size_t kBitsPerCell = sizeof(CellType) * CHAR_BIT;
|
||||
static constexpr size_t kBitsNeededForAllocation =
|
||||
bits::Log2Floor(static_cast<size_t>(State::kNumOfStates));
|
||||
base::bits::Log2Floor(static_cast<size_t>(State::kNumOfStates));
|
||||
static constexpr CellType kStateMask = (1 << kBitsNeededForAllocation) - 1;
|
||||
|
||||
static constexpr size_t kBitmapSize =
|
||||
@ -483,7 +482,6 @@ void StateBitmap<PageSize, PageAlignment, AllocationAlignment>::Clear() {
|
||||
std::fill(bitmap_.begin(), bitmap_.end(), '\0');
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATE_BITMAP_H_
|
||||
|
@ -8,8 +8,7 @@
|
||||
#include "base/allocator/partition_allocator/starscan/stats_reporter.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
StatsCollector::StatsCollector(const char* process_name,
|
||||
size_t quarantine_last_size)
|
||||
@ -25,7 +24,8 @@ base::TimeDelta StatsCollector::GetOverallTime() const {
|
||||
ScannerId::kOverall);
|
||||
}
|
||||
|
||||
void StatsCollector::ReportTracesAndHists(StatsReporter& reporter) const {
|
||||
void StatsCollector::ReportTracesAndHists(
|
||||
partition_alloc::StatsReporter& reporter) const {
|
||||
ReportTracesAndHistsImpl<Context::kMutator>(reporter, mutator_trace_events_);
|
||||
ReportTracesAndHistsImpl<Context::kScanner>(reporter, scanner_trace_events_);
|
||||
ReportSurvivalRate(reporter);
|
||||
@ -46,13 +46,13 @@ base::TimeDelta StatsCollector::GetTimeImpl(
|
||||
|
||||
template <Context context>
|
||||
void StatsCollector::ReportTracesAndHistsImpl(
|
||||
StatsReporter& reporter,
|
||||
partition_alloc::StatsReporter& reporter,
|
||||
const DeferredTraceEventMap<context>& event_map) const {
|
||||
std::array<base::TimeDelta, static_cast<size_t>(IdType<context>::kNumIds)>
|
||||
accumulated_events{};
|
||||
// First, report traces and accumulate each trace scope to report UMA hists.
|
||||
for (const auto& tid_and_events : event_map.get_underlying_map_unsafe()) {
|
||||
const PlatformThreadId tid = tid_and_events.first;
|
||||
const ::base::PlatformThreadId tid = tid_and_events.first;
|
||||
const auto& events = tid_and_events.second;
|
||||
PA_DCHECK(accumulated_events.size() == events.size());
|
||||
for (size_t id = 0; id < events.size(); ++id) {
|
||||
@ -79,7 +79,8 @@ void StatsCollector::ReportTracesAndHistsImpl(
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCollector::ReportSurvivalRate(StatsReporter& reporter) const {
|
||||
void StatsCollector::ReportSurvivalRate(
|
||||
partition_alloc::StatsReporter& reporter) const {
|
||||
const double survived_rate =
|
||||
static_cast<double>(survived_quarantine_size()) / quarantine_last_size_;
|
||||
reporter.ReportSurvivedQuarantineSize(survived_quarantine_size());
|
||||
@ -101,11 +102,10 @@ template base::TimeDelta StatsCollector::GetTimeImpl(
|
||||
IdType<Context::kScanner>) const;
|
||||
|
||||
template void StatsCollector::ReportTracesAndHistsImpl(
|
||||
StatsReporter& reporter,
|
||||
partition_alloc::StatsReporter& reporter,
|
||||
const DeferredTraceEventMap<Context::kMutator>&) const;
|
||||
template void StatsCollector::ReportTracesAndHistsImpl(
|
||||
StatsReporter& reporter,
|
||||
partition_alloc::StatsReporter& reporter,
|
||||
const DeferredTraceEventMap<Context::kScanner>&) const;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
class StatsReporter;
|
||||
|
||||
@ -74,11 +74,12 @@ class StatsCollector final {
|
||||
using PerThreadEvents =
|
||||
std::array<DeferredTraceEvent, static_cast<size_t>(IdType::kNumIds)>;
|
||||
using UnderlyingMap = std::unordered_map<
|
||||
PlatformThreadId,
|
||||
::base::PlatformThreadId,
|
||||
PerThreadEvents,
|
||||
std::hash<PlatformThreadId>,
|
||||
std::hash<::base::PlatformThreadId>,
|
||||
std::equal_to<>,
|
||||
MetadataAllocator<std::pair<const PlatformThreadId, PerThreadEvents>>>;
|
||||
MetadataAllocator<
|
||||
std::pair<const ::base::PlatformThreadId, PerThreadEvents>>>;
|
||||
|
||||
inline void RegisterBeginEventFromCurrentThread(IdType id);
|
||||
inline void RegisterEndEventFromCurrentThread(IdType id);
|
||||
@ -133,7 +134,7 @@ class StatsCollector final {
|
||||
}
|
||||
|
||||
base::TimeDelta GetOverallTime() const;
|
||||
void ReportTracesAndHists(StatsReporter& reporter) const;
|
||||
void ReportTracesAndHists(partition_alloc::StatsReporter& reporter) const;
|
||||
|
||||
private:
|
||||
using MetadataString =
|
||||
@ -161,10 +162,10 @@ class StatsCollector final {
|
||||
|
||||
template <Context context>
|
||||
void ReportTracesAndHistsImpl(
|
||||
StatsReporter& reporter,
|
||||
partition_alloc::StatsReporter& reporter,
|
||||
const DeferredTraceEventMap<context>& event_map) const;
|
||||
|
||||
void ReportSurvivalRate(StatsReporter& reporter) const;
|
||||
void ReportSurvivalRate(partition_alloc::StatsReporter& reporter) const;
|
||||
|
||||
DeferredTraceEventMap<Context::kMutator> mutator_trace_events_;
|
||||
DeferredTraceEventMap<Context::kScanner> scanner_trace_events_;
|
||||
@ -242,6 +243,13 @@ inline StatsCollector::MetadataString StatsCollector::ToUMAString(
|
||||
#undef FOR_ALL_PCSCAN_SCANNER_SCOPES
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
||||
// TODO(crbug.com/1151236): Remove this when migration is complete.
|
||||
namespace base::internal {
|
||||
|
||||
using ::partition_alloc::internal::StatsCollector;
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_COLLECTOR_H_
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace partition_alloc {
|
||||
|
||||
// StatsReporter is a wrapper to invoke TRACE_EVENT_BEGIN/END, TRACE_COUNTER1,
|
||||
// and UmaHistogramTimes. It is used to just remove trace_log and uma
|
||||
@ -17,21 +17,21 @@ namespace base {
|
||||
class StatsReporter {
|
||||
public:
|
||||
virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
|
||||
const PlatformThreadId tid,
|
||||
TimeTicks start_time,
|
||||
TimeTicks end_time) {}
|
||||
const base::PlatformThreadId tid,
|
||||
base::TimeTicks start_time,
|
||||
base::TimeTicks end_time) {}
|
||||
virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
|
||||
const PlatformThreadId tid,
|
||||
TimeTicks start_time,
|
||||
TimeTicks end_time) {}
|
||||
const base::PlatformThreadId tid,
|
||||
base::TimeTicks start_time,
|
||||
base::TimeTicks end_time) {}
|
||||
|
||||
virtual void ReportSurvivedQuarantineSize(size_t survived_size) {}
|
||||
|
||||
virtual void ReportSurvivedQuarantinePercent(double survivied_rate) {}
|
||||
|
||||
virtual void ReportStats(const char* stats_name, TimeDelta sample) {}
|
||||
virtual void ReportStats(const char* stats_name, base::TimeDelta sample) {}
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_STATS_REPORTER_H_
|
||||
|
@ -25,11 +25,11 @@
|
||||
#include <sys/types.h>
|
||||
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
PCScan::ClearType NoWriteProtector::SupportedClearType() const {
|
||||
return PCScan::ClearType::kLazy;
|
||||
::base::internal::PCScan::ClearType NoWriteProtector::SupportedClearType()
|
||||
const {
|
||||
return ::base::internal::PCScan::ClearType::kLazy;
|
||||
}
|
||||
|
||||
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
@ -38,7 +38,7 @@ void UserFaultFDThread(int uffd) {
|
||||
PA_DCHECK(-1 != uffd);
|
||||
|
||||
static constexpr char kThreadName[] = "PCScanPFHandler";
|
||||
base::PlatformThread::SetName(kThreadName);
|
||||
::base::PlatformThread::SetName(kThreadName);
|
||||
|
||||
while (true) {
|
||||
// Pool on the uffd descriptor for page fault events.
|
||||
@ -58,7 +58,7 @@ void UserFaultFDThread(int uffd) {
|
||||
|
||||
// Enter the safepoint. Concurrent faulted writes will wait until safepoint
|
||||
// finishes.
|
||||
PCScan::JoinScanIfNeeded();
|
||||
::base::internal::PCScan::JoinScanIfNeeded();
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
@ -121,8 +121,10 @@ void UserFaultFDWriteProtector::UnprotectPages(uintptr_t begin, size_t length) {
|
||||
UserFaultFDWPSet(uffd_, begin, length, UserFaultFDWPMode::kUnprotect);
|
||||
}
|
||||
|
||||
PCScan::ClearType UserFaultFDWriteProtector::SupportedClearType() const {
|
||||
return IsSupported() ? PCScan::ClearType::kEager : PCScan::ClearType::kLazy;
|
||||
::base::internal::PCScan::ClearType
|
||||
UserFaultFDWriteProtector::SupportedClearType() const {
|
||||
return IsSupported() ? ::base::internal::PCScan::ClearType::kEager
|
||||
: ::base::internal::PCScan::ClearType::kLazy;
|
||||
}
|
||||
|
||||
bool UserFaultFDWriteProtector::IsSupported() const {
|
||||
@ -131,5 +133,4 @@ bool UserFaultFDWriteProtector::IsSupported() const {
|
||||
|
||||
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -14,8 +14,7 @@
|
||||
#include "base/allocator/partition_allocator/starscan/raceful_worklist.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Interface for page protection/unprotection. This is used in DCScan to catch
|
||||
// concurrent mutator writes. Protection is done when the scanner starts
|
||||
@ -29,14 +28,14 @@ class WriteProtector : public AllocatedOnPCScanMetadataPartition {
|
||||
|
||||
virtual bool IsEnabled() const = 0;
|
||||
|
||||
virtual PCScan::ClearType SupportedClearType() const = 0;
|
||||
virtual ::base::internal::PCScan::ClearType SupportedClearType() const = 0;
|
||||
};
|
||||
|
||||
class NoWriteProtector final : public WriteProtector {
|
||||
public:
|
||||
void ProtectPages(uintptr_t, size_t) final {}
|
||||
void UnprotectPages(uintptr_t, size_t) final {}
|
||||
PCScan::ClearType SupportedClearType() const final;
|
||||
::base::internal::PCScan::ClearType SupportedClearType() const final;
|
||||
inline bool IsEnabled() const override;
|
||||
};
|
||||
|
||||
@ -56,7 +55,7 @@ class UserFaultFDWriteProtector final : public WriteProtector {
|
||||
void ProtectPages(uintptr_t, size_t) final;
|
||||
void UnprotectPages(uintptr_t, size_t) final;
|
||||
|
||||
PCScan::ClearType SupportedClearType() const final;
|
||||
::base::internal::PCScan::ClearType SupportedClearType() const final;
|
||||
|
||||
inline bool IsEnabled() const override;
|
||||
|
||||
@ -72,7 +71,17 @@ bool UserFaultFDWriteProtector::IsEnabled() const {
|
||||
|
||||
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
// TODO(crbug.com/1288247): Remove these when migration is complete.
|
||||
namespace base::internal {
|
||||
using ::partition_alloc::internal::NoWriteProtector;
|
||||
using ::partition_alloc::internal::WriteProtector;
|
||||
|
||||
#if defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
using ::partition_alloc::internal::UserFaultFDWriteProtector;
|
||||
#endif // defined(PA_STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
|
||||
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_WRITE_PROTECTOR_H_
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
@ -20,24 +21,24 @@
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base::internal {
|
||||
namespace partition_alloc {
|
||||
|
||||
namespace {
|
||||
ThreadCacheRegistry g_instance;
|
||||
} // namespace
|
||||
} // namespace base::internal
|
||||
|
||||
namespace partition_alloc::internal::tools {
|
||||
namespace tools {
|
||||
uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize] = {
|
||||
kNeedle1, reinterpret_cast<uintptr_t>(&::base::internal::g_instance),
|
||||
kNeedle1, reinterpret_cast<uintptr_t>(&g_instance),
|
||||
#if BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
reinterpret_cast<uintptr_t>(&partition_alloc::internal::g_allocs),
|
||||
reinterpret_cast<uintptr_t>(&internal::g_allocs),
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
kNeedle2};
|
||||
} // namespace partition_alloc::internal::tools
|
||||
} // namespace tools
|
||||
|
||||
namespace base::internal {
|
||||
namespace internal {
|
||||
|
||||
BASE_EXPORT PartitionTlsKey g_thread_cache_key;
|
||||
#if defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
@ -45,6 +46,8 @@ BASE_EXPORT
|
||||
thread_local ThreadCache* g_thread_cache;
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
namespace {
|
||||
// Since |g_thread_cache_key| is shared, make sure that no more than one
|
||||
// PartitionRoot can use it.
|
||||
@ -74,7 +77,7 @@ uint8_t ThreadCache::global_limits_[ThreadCache::kBucketCount];
|
||||
|
||||
// Start with the normal size, not the maximum one.
|
||||
uint16_t ThreadCache::largest_active_bucket_index_ =
|
||||
BucketIndexLookup::GetIndex(ThreadCache::kDefaultSizeThreshold);
|
||||
internal::BucketIndexLookup::GetIndex(ThreadCache::kDefaultSizeThreshold);
|
||||
|
||||
// static
|
||||
ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
|
||||
@ -82,7 +85,7 @@ ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
|
||||
}
|
||||
|
||||
void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
cache->next_ = nullptr;
|
||||
cache->prev_ = nullptr;
|
||||
|
||||
@ -94,7 +97,7 @@ void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
|
||||
}
|
||||
|
||||
void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
if (cache->prev_)
|
||||
cache->prev_->next_ = cache->next_;
|
||||
if (cache->next_)
|
||||
@ -108,7 +111,7 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only,
|
||||
ThreadCache::EnsureThreadSpecificDataInitialized();
|
||||
memset(reinterpret_cast<void*>(stats), 0, sizeof(ThreadCacheStats));
|
||||
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
if (my_thread_only) {
|
||||
auto* tcache = ThreadCache::Get();
|
||||
if (!ThreadCache::IsValid(tcache))
|
||||
@ -146,7 +149,7 @@ void ThreadCacheRegistry::PurgeAll() {
|
||||
current_thread_tcache->Purge();
|
||||
|
||||
{
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
ThreadCache* tcache = list_head_;
|
||||
while (tcache) {
|
||||
PA_DCHECK(ThreadCache::IsValid(tcache));
|
||||
@ -162,7 +165,7 @@ void ThreadCacheRegistry::PurgeAll() {
|
||||
}
|
||||
|
||||
void ThreadCacheRegistry::ForcePurgeAllThreadAfterForkUnsafe() {
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
ThreadCache* tcache = list_head_;
|
||||
while (tcache) {
|
||||
#if DCHECK_IS_ON()
|
||||
@ -186,7 +189,13 @@ void ThreadCacheRegistry::ForcePurgeAllThreadAfterForkUnsafe() {
|
||||
// passes. See crbug.com/1216964.
|
||||
tcache->cached_memory_ = tcache->CachedMemory();
|
||||
|
||||
tcache->TryPurge();
|
||||
// At this point, we should call |TryPurge|. However, due to the thread
|
||||
// cache being possibly inconsistent at this point, this may crash. Rather
|
||||
// than crash, we'd prefer to simply not purge, even though this may leak
|
||||
// memory in some cases.
|
||||
//
|
||||
// see crbug.com/1289092 for details of the crashes.
|
||||
|
||||
tcache = tcache->next_;
|
||||
}
|
||||
}
|
||||
@ -201,7 +210,7 @@ void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
|
||||
// - Set the global limits, which will affect newly created threads.
|
||||
// - Enumerate all thread caches and set the limit to the global one.
|
||||
{
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
ThreadCache* tcache = list_head_;
|
||||
|
||||
// If this is called before *any* thread cache has serviced *any*
|
||||
@ -241,7 +250,7 @@ void ThreadCacheRegistry::RunPeriodicPurge() {
|
||||
// which is fine.
|
||||
size_t cached_memory_approx = 0;
|
||||
{
|
||||
PartitionAutoLock scoped_locker(GetLock());
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
ThreadCache* tcache = list_head_;
|
||||
// Can run when there is no thread cache, in which case there is nothing to
|
||||
// do, and the task should not be rescheduled. This would typically indicate
|
||||
@ -304,11 +313,12 @@ void ThreadCacheRegistry::ResetForTesting() {
|
||||
void ThreadCache::EnsureThreadSpecificDataInitialized() {
|
||||
// Using the registry lock to protect from concurrent initialization without
|
||||
// adding a special-pupose lock.
|
||||
PartitionAutoLock scoped_locker(ThreadCacheRegistry::Instance().GetLock());
|
||||
internal::ScopedGuard scoped_locker(
|
||||
ThreadCacheRegistry::Instance().GetLock());
|
||||
if (g_thread_cache_key_created)
|
||||
return;
|
||||
|
||||
bool ok = PartitionTlsCreate(&g_thread_cache_key, Delete);
|
||||
bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
|
||||
PA_CHECK(ok);
|
||||
g_thread_cache_key_created = true;
|
||||
}
|
||||
@ -330,7 +340,7 @@ void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
|
||||
} else {
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
// OnDllProcessDetach accesses g_thread_cache_root which is nullptr now.
|
||||
PartitionTlsSetOnDllProcessDetach(nullptr);
|
||||
internal::PartitionTlsSetOnDllProcessDetach(nullptr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -338,7 +348,7 @@ void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
|
||||
// static
|
||||
void ThreadCache::RemoveTombstoneForTesting() {
|
||||
PA_CHECK(IsTombstone(Get()));
|
||||
PartitionTlsSet(g_thread_cache_key, nullptr);
|
||||
internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
|
||||
}
|
||||
|
||||
// static
|
||||
@ -363,7 +373,7 @@ void ThreadCache::Init(PartitionRoot<>* root) {
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
PartitionTlsSetOnDllProcessDetach(OnDllProcessDetach);
|
||||
internal::PartitionTlsSetOnDllProcessDetach(OnDllProcessDetach);
|
||||
#endif
|
||||
|
||||
SetGlobalLimits(root, kDefaultMultiplier);
|
||||
@ -424,8 +434,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
|
||||
PA_CHECK(root);
|
||||
// See comment in thread_cache.h, this is used to make sure
|
||||
// kThreadCacheNeedleArray is kept in the final binary.
|
||||
PA_CHECK(partition_alloc::internal::tools::kThreadCacheNeedleArray[0] ==
|
||||
partition_alloc::internal::tools::kNeedle1);
|
||||
PA_CHECK(tools::kThreadCacheNeedleArray[0] == tools::kNeedle1);
|
||||
|
||||
// Placement new and RawAlloc() are used, as otherwise when this partition is
|
||||
// the malloc() implementation, the memory allocated for the new thread cache
|
||||
@ -440,13 +449,13 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
|
||||
auto* bucket =
|
||||
root->buckets + PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
|
||||
raw_size, root->with_denser_bucket_distribution);
|
||||
uintptr_t buffer =
|
||||
root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
|
||||
PartitionPageSize(), &usable_size, &already_zeroed);
|
||||
uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
|
||||
internal::PartitionPageSize(), &usable_size,
|
||||
&already_zeroed);
|
||||
ThreadCache* tcache = new (reinterpret_cast<void*>(buffer)) ThreadCache(root);
|
||||
|
||||
// This may allocate.
|
||||
PartitionTlsSet(g_thread_cache_key, tcache);
|
||||
internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
|
||||
#if defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
// |thread_local| variables with destructors cause issues on some platforms.
|
||||
// Since we need a destructor (to empty the thread cache), we cannot use it
|
||||
@ -457,7 +466,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
|
||||
//
|
||||
// To still get good performance, use |thread_local| to store a raw pointer,
|
||||
// and rely on the platform TLS to call the destructor.
|
||||
g_thread_cache = tcache;
|
||||
internal::g_thread_cache = tcache;
|
||||
#endif // defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
|
||||
return tcache;
|
||||
@ -466,7 +475,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
|
||||
ThreadCache::ThreadCache(PartitionRoot<>* root)
|
||||
: should_purge_(false),
|
||||
root_(root),
|
||||
thread_id_(PlatformThread::CurrentId()),
|
||||
thread_id_(base::PlatformThread::CurrentId()),
|
||||
next_(nullptr),
|
||||
prev_(nullptr) {
|
||||
ThreadCacheRegistry::Instance().RegisterThreadCache(this);
|
||||
@ -503,9 +512,9 @@ void ThreadCache::Delete(void* tcache_ptr) {
|
||||
return;
|
||||
|
||||
#if defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
g_thread_cache = nullptr;
|
||||
internal::g_thread_cache = nullptr;
|
||||
#else
|
||||
PartitionTlsSet(g_thread_cache_key, nullptr);
|
||||
internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
|
||||
#endif
|
||||
|
||||
auto* root = tcache->root_;
|
||||
@ -517,9 +526,10 @@ void ThreadCache::Delete(void* tcache_ptr) {
|
||||
// they don't resurrect the thread cache.
|
||||
//
|
||||
// TODO(lizeb): Investigate whether this is needed on POSIX as well.
|
||||
PartitionTlsSet(g_thread_cache_key, reinterpret_cast<void*>(kTombstone));
|
||||
internal::PartitionTlsSet(internal::g_thread_cache_key,
|
||||
reinterpret_cast<void*>(kTombstone));
|
||||
#if defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
g_thread_cache = reinterpret_cast<ThreadCache*>(kTombstone);
|
||||
internal::g_thread_cache = reinterpret_cast<ThreadCache*>(kTombstone);
|
||||
#endif
|
||||
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
@ -557,7 +567,7 @@ void ThreadCache::FillBucket(size_t bucket_index) {
|
||||
// clearing which would greatly increase calls to the central allocator. (3)
|
||||
// tries to keep memory usage low. So clearing half of the bucket, and filling
|
||||
// a quarter of it are sensible defaults.
|
||||
INCREMENT_COUNTER(stats_.batch_fill_count);
|
||||
PA_INCREMENT_COUNTER(stats_.batch_fill_count);
|
||||
|
||||
Bucket& bucket = buckets_[bucket_index];
|
||||
// Some buckets may have a limit lower than |kBatchFillRatio|, but we still
|
||||
@ -577,7 +587,7 @@ void ThreadCache::FillBucket(size_t bucket_index) {
|
||||
|
||||
size_t allocated_slots = 0;
|
||||
// Same as calling RawAlloc() |count| times, but acquires the lock only once.
|
||||
::partition_alloc::internal::ScopedGuard guard(root_->lock_);
|
||||
internal::ScopedGuard guard(root_->lock_);
|
||||
for (int i = 0; i < count; i++) {
|
||||
// Thread cache fill should not trigger expensive operations, to not grab
|
||||
// the lock for a long time needlessly, but also to not inflate memory
|
||||
@ -592,7 +602,7 @@ void ThreadCache::FillBucket(size_t bucket_index) {
|
||||
&root_->buckets[bucket_index],
|
||||
AllocFlags::kFastPathOrReturnNull | AllocFlags::kReturnNull,
|
||||
root_->buckets[bucket_index].slot_size /* raw_size */,
|
||||
PartitionPageSize(), &usable_size, &is_already_zeroed);
|
||||
internal::PartitionPageSize(), &usable_size, &is_already_zeroed);
|
||||
|
||||
// Either the previous allocation would require a slow path allocation, or
|
||||
// the central allocator is out of memory. If the bucket was filled with
|
||||
@ -662,11 +672,12 @@ void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
|
||||
}
|
||||
|
||||
template <bool crash_on_corruption>
|
||||
void ThreadCache::FreeAfter(PartitionFreelistEntry* head, size_t slot_size) {
|
||||
void ThreadCache::FreeAfter(internal::PartitionFreelistEntry* head,
|
||||
size_t slot_size) {
|
||||
// Acquire the lock once. Deallocation from the same bucket are likely to be
|
||||
// hitting the same cache lines in the central allocator, and lock
|
||||
// acquisitions can be expensive.
|
||||
::partition_alloc::internal::ScopedGuard guard(root_->lock_);
|
||||
internal::ScopedGuard guard(root_->lock_);
|
||||
while (head) {
|
||||
uintptr_t slot_start = reinterpret_cast<uintptr_t>(head);
|
||||
head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
|
||||
@ -719,7 +730,7 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
|
||||
stats->batch_fill_count += stats_.batch_fill_count;
|
||||
|
||||
#if defined(PA_THREAD_CACHE_ALLOC_STATS)
|
||||
for (size_t i = 0; i < kNumBuckets + 1; i++)
|
||||
for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
|
||||
stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
|
||||
#endif // defined(PA_THREAD_CACHE_ALLOC_STATS)
|
||||
|
||||
@ -770,4 +781,4 @@ void ThreadCache::PurgeInternalHelper() {
|
||||
ClearBucketHelper<crash_on_corruption>(bucket, 0);
|
||||
}
|
||||
|
||||
} // namespace base::internal
|
||||
} // namespace partition_alloc
|
||||
|
@ -18,18 +18,21 @@
|
||||
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||
#include "base/allocator/partition_allocator/partition_tls.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/dcheck_is_on.h"
|
||||
#include "base/gtest_prod_util.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(ARCH_CPU_X86_64) && defined(PA_HAS_64_BITS_POINTERS)
|
||||
#include <algorithm>
|
||||
#endif
|
||||
|
||||
namespace partition_alloc::internal::tools {
|
||||
namespace partition_alloc {
|
||||
|
||||
class ThreadCache;
|
||||
|
||||
namespace tools {
|
||||
|
||||
// This is used from ThreadCacheInspector, which runs in a different process. It
|
||||
// scans the process memory looking for the two needles, to locate the thread
|
||||
@ -56,16 +59,13 @@ constexpr uintptr_t kNeedle2 = 0x9615ee1c;
|
||||
constexpr size_t kThreadCacheNeedleArraySize = 4;
|
||||
extern uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize];
|
||||
|
||||
class HeapDumper;
|
||||
class ThreadCacheInspector;
|
||||
|
||||
} // namespace partition_alloc::internal::tools
|
||||
|
||||
namespace base {
|
||||
} // namespace tools
|
||||
|
||||
namespace internal {
|
||||
|
||||
class ThreadCache;
|
||||
|
||||
extern BASE_EXPORT PartitionTlsKey g_thread_cache_key;
|
||||
// On Android, we have to go through emutls, since this is always a shared
|
||||
// library, so don't bother.
|
||||
@ -77,6 +77,8 @@ extern BASE_EXPORT PartitionTlsKey g_thread_cache_key;
|
||||
extern BASE_EXPORT thread_local ThreadCache* g_thread_cache;
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
|
||||
struct ThreadCacheLimits {
|
||||
// When trying to conserve memory, set the thread cache limit to this.
|
||||
static constexpr size_t kDefaultSizeThreshold = 512;
|
||||
@ -134,23 +136,25 @@ class BASE_EXPORT ThreadCacheRegistry {
|
||||
void SetThreadCacheMultiplier(float multiplier);
|
||||
void SetLargestActiveBucketIndex(uint8_t largest_active_bucket_index);
|
||||
|
||||
static PartitionLock& GetLock() { return Instance().lock_; }
|
||||
static internal::Lock& GetLock() { return Instance().lock_; }
|
||||
// Purges all thread caches *now*. This is completely thread-unsafe, and
|
||||
// should only be called in a post-fork() handler.
|
||||
void ForcePurgeAllThreadAfterForkUnsafe();
|
||||
|
||||
void ResetForTesting();
|
||||
|
||||
static constexpr TimeDelta kMinPurgeInterval = Seconds(1);
|
||||
static constexpr TimeDelta kMaxPurgeInterval = Minutes(1);
|
||||
static constexpr TimeDelta kDefaultPurgeInterval = 2 * kMinPurgeInterval;
|
||||
static constexpr base::TimeDelta kMinPurgeInterval = base::Seconds(1);
|
||||
static constexpr base::TimeDelta kMaxPurgeInterval = base::Minutes(1);
|
||||
static constexpr base::TimeDelta kDefaultPurgeInterval =
|
||||
2 * kMinPurgeInterval;
|
||||
static constexpr size_t kMinCachedMemoryForPurging = 500 * 1024;
|
||||
|
||||
private:
|
||||
friend class partition_alloc::internal::tools::ThreadCacheInspector;
|
||||
friend class NoDestructor<ThreadCacheRegistry>;
|
||||
friend class tools::ThreadCacheInspector;
|
||||
friend class tools::HeapDumper;
|
||||
|
||||
// Not using base::Lock as the object's constructor must be constexpr.
|
||||
PartitionLock lock_;
|
||||
internal::Lock lock_;
|
||||
ThreadCache* list_head_ GUARDED_BY(GetLock()) = nullptr;
|
||||
bool periodic_purge_is_initialized_ = false;
|
||||
base::TimeDelta periodic_purge_next_interval_ = kDefaultPurgeInterval;
|
||||
@ -160,24 +164,25 @@ class BASE_EXPORT ThreadCacheRegistry {
|
||||
// understand enough constexpr to handle the code below.
|
||||
uint8_t largest_active_bucket_index_ = 1;
|
||||
#else
|
||||
uint8_t largest_active_bucket_index_ =
|
||||
BucketIndexLookup::GetIndex(ThreadCacheLimits::kDefaultSizeThreshold);
|
||||
uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
|
||||
ThreadCacheLimits::kDefaultSizeThreshold);
|
||||
#endif
|
||||
};
|
||||
|
||||
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
|
||||
|
||||
#if defined(PA_THREAD_CACHE_ENABLE_STATISTICS)
|
||||
#define INCREMENT_COUNTER(counter) ++counter
|
||||
#define GET_COUNTER(counter) counter
|
||||
#define PA_INCREMENT_COUNTER(counter) ++counter
|
||||
#else
|
||||
#define INCREMENT_COUNTER(counter) \
|
||||
#define PA_INCREMENT_COUNTER(counter) \
|
||||
do { \
|
||||
} while (0)
|
||||
#define GET_COUNTER(counter) 0
|
||||
#endif // defined(PA_THREAD_CACHE_ENABLE_STATISTICS)
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
|
||||
namespace internal {
|
||||
|
||||
class ReentrancyGuard {
|
||||
public:
|
||||
explicit ReentrancyGuard(bool& flag) : flag_(flag) {
|
||||
@ -191,10 +196,12 @@ class ReentrancyGuard {
|
||||
bool& flag_;
|
||||
};
|
||||
|
||||
#define PA_REENTRANCY_GUARD(x) \
|
||||
ReentrancyGuard guard { x }
|
||||
} // namespace internal
|
||||
|
||||
#else
|
||||
#define PA_REENTRANCY_GUARD(x) \
|
||||
internal::ReentrancyGuard guard { x }
|
||||
|
||||
#else // DCHECK_IS_ON()
|
||||
|
||||
#define PA_REENTRANCY_GUARD(x) \
|
||||
do { \
|
||||
@ -232,9 +239,10 @@ class BASE_EXPORT ThreadCache {
|
||||
|
||||
static ThreadCache* Get() {
|
||||
#if defined(PA_THREAD_CACHE_FAST_TLS)
|
||||
return g_thread_cache;
|
||||
return internal::g_thread_cache;
|
||||
#else
|
||||
return reinterpret_cast<ThreadCache*>(PartitionTlsGet(g_thread_cache_key));
|
||||
return reinterpret_cast<ThreadCache*>(
|
||||
internal::PartitionTlsGet(internal::g_thread_cache_key));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -298,7 +306,7 @@ class BASE_EXPORT ThreadCache {
|
||||
return buckets_[index].count;
|
||||
}
|
||||
|
||||
PlatformThreadId thread_id() const { return thread_id_; }
|
||||
base::PlatformThreadId thread_id() const { return thread_id_; }
|
||||
|
||||
// Sets the maximum size of allocations that may be cached by the thread
|
||||
// cache. This applies to all threads. However, the maximum size is bounded by
|
||||
@ -319,10 +327,11 @@ class BASE_EXPORT ThreadCache {
|
||||
ThreadCacheLimits::kLargeSizeThreshold;
|
||||
|
||||
private:
|
||||
friend class partition_alloc::internal::tools::ThreadCacheInspector;
|
||||
friend class tools::HeapDumper;
|
||||
friend class tools::ThreadCacheInspector;
|
||||
|
||||
struct Bucket {
|
||||
PartitionFreelistEntry* freelist_head = nullptr;
|
||||
internal::PartitionFreelistEntry* freelist_head = nullptr;
|
||||
// Want to keep sizeof(Bucket) small, using small types.
|
||||
uint8_t count = 0;
|
||||
std::atomic<uint8_t> limit{}; // Can be changed from another thread.
|
||||
@ -349,7 +358,7 @@ class BASE_EXPORT ThreadCache {
|
||||
void ResetForTesting();
|
||||
// Releases the entire freelist starting at |head| to the root.
|
||||
template <bool crash_on_corruption>
|
||||
void FreeAfter(PartitionFreelistEntry* head, size_t slot_size);
|
||||
void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
|
||||
static void SetGlobalLimits(PartitionRoot<>* root, float multiplier);
|
||||
|
||||
#if BUILDFLAG(IS_NACL)
|
||||
@ -362,7 +371,7 @@ class BASE_EXPORT ThreadCache {
|
||||
1;
|
||||
#endif
|
||||
static_assert(
|
||||
kBucketCount < kNumBuckets,
|
||||
kBucketCount < internal::kNumBuckets,
|
||||
"Cannot have more cached buckets than what the allocator supports");
|
||||
|
||||
// On some architectures, ThreadCache::Get() can be called and return
|
||||
@ -396,7 +405,7 @@ class BASE_EXPORT ThreadCache {
|
||||
|
||||
// Cold data below.
|
||||
PartitionRoot<>* const root_;
|
||||
const PlatformThreadId thread_id_;
|
||||
const base::PlatformThreadId thread_id_;
|
||||
#if DCHECK_IS_ON()
|
||||
bool is_in_thread_cache_ = false;
|
||||
#endif
|
||||
@ -408,7 +417,7 @@ class BASE_EXPORT ThreadCache {
|
||||
|
||||
friend class ThreadCacheRegistry;
|
||||
friend class PartitionAllocThreadCacheTest;
|
||||
friend class partition_alloc::internal::tools::ThreadCacheInspector;
|
||||
friend class tools::ThreadCacheInspector;
|
||||
FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple);
|
||||
FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
|
||||
MultipleObjectsCachedPerBucket);
|
||||
@ -434,10 +443,10 @@ class BASE_EXPORT ThreadCache {
|
||||
ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
|
||||
size_t bucket_index) {
|
||||
PA_REENTRANCY_GUARD(is_in_thread_cache_);
|
||||
INCREMENT_COUNTER(stats_.cache_fill_count);
|
||||
PA_INCREMENT_COUNTER(stats_.cache_fill_count);
|
||||
|
||||
if (UNLIKELY(bucket_index > largest_active_bucket_index_)) {
|
||||
INCREMENT_COUNTER(stats_.cache_fill_misses);
|
||||
PA_INCREMENT_COUNTER(stats_.cache_fill_misses);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -447,7 +456,7 @@ ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
|
||||
|
||||
PutInBucket(bucket, slot_start);
|
||||
cached_memory_ += bucket.slot_size;
|
||||
INCREMENT_COUNTER(stats_.cache_fill_hits);
|
||||
PA_INCREMENT_COUNTER(stats_.cache_fill_hits);
|
||||
|
||||
// Relaxed ordering: we don't care about having an up-to-date or consistent
|
||||
// value, just want it to not change while we are using it, hence using
|
||||
@ -472,21 +481,21 @@ ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
|
||||
#endif
|
||||
|
||||
PA_REENTRANCY_GUARD(is_in_thread_cache_);
|
||||
INCREMENT_COUNTER(stats_.alloc_count);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_count);
|
||||
// Only handle "small" allocations.
|
||||
if (UNLIKELY(bucket_index > largest_active_bucket_index_)) {
|
||||
INCREMENT_COUNTER(stats_.alloc_miss_too_large);
|
||||
INCREMENT_COUNTER(stats_.alloc_misses);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_miss_too_large);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_misses);
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto& bucket = buckets_[bucket_index];
|
||||
if (LIKELY(bucket.freelist_head)) {
|
||||
INCREMENT_COUNTER(stats_.alloc_hits);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_hits);
|
||||
} else {
|
||||
PA_DCHECK(bucket.count == 0);
|
||||
INCREMENT_COUNTER(stats_.alloc_miss_empty);
|
||||
INCREMENT_COUNTER(stats_.alloc_misses);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_miss_empty);
|
||||
PA_INCREMENT_COUNTER(stats_.alloc_misses);
|
||||
|
||||
FillBucket(bucket_index);
|
||||
|
||||
@ -530,12 +539,12 @@ ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
|
||||
// Here, only poison the current cacheline, which we are touching anyway.
|
||||
// TODO(lizeb): Make sure this does not hurt performance.
|
||||
|
||||
// Everything below requires this aligment.
|
||||
static_assert(kAlignment == 16, "");
|
||||
// Everything below requires this alignment.
|
||||
static_assert(internal::kAlignment == 16, "");
|
||||
|
||||
#if HAS_BUILTIN(__builtin_assume_aligned)
|
||||
uintptr_t address = reinterpret_cast<uintptr_t>(__builtin_assume_aligned(
|
||||
reinterpret_cast<void*>(slot_start), kAlignment));
|
||||
reinterpret_cast<void*>(slot_start), internal::kAlignment));
|
||||
#else
|
||||
uintptr_t address = slot_start;
|
||||
#endif
|
||||
@ -544,7 +553,7 @@ ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
|
||||
// % 16. Its distance to the next cacheline is 64 - ((address & 63) / 16) *
|
||||
// 16.
|
||||
static_assert(
|
||||
kPartitionCachelineSize == 64,
|
||||
internal::kPartitionCachelineSize == 64,
|
||||
"The computation below assumes that cache lines are 64 bytes long.");
|
||||
int distance_to_next_cacheline_in_16_bytes = 4 - ((address >> 4) & 3);
|
||||
int slot_size_remaining_in_16_bytes =
|
||||
@ -572,21 +581,21 @@ ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
|
||||
#endif // defined(PA_HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
|
||||
// defined(PA_HAS_64_BITS_POINTERS)
|
||||
|
||||
auto* entry = PartitionFreelistEntry::EmplaceAndInitForThreadCache(
|
||||
auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
|
||||
slot_start, bucket.freelist_head);
|
||||
bucket.freelist_head = entry;
|
||||
bucket.count++;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
} // namespace partition_alloc
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
namespace base::internal {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::internal::ThreadCache;
|
||||
using ::partition_alloc::ThreadCache;
|
||||
using ::partition_alloc::ThreadCacheRegistry;
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
} // namespace base::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
|
||||
|
46
src/base/android/base_feature_list.cc
Normal file
46
src/base/android/base_feature_list.cc
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/android/base_features.h"
|
||||
#include "base/android/jni_string.h"
|
||||
#include "base/base_jni_headers/BaseFeatureList_jni.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/notreached.h"
|
||||
|
||||
using base::android::ConvertJavaStringToUTF8;
|
||||
using base::android::JavaParamRef;
|
||||
|
||||
namespace base::android {
|
||||
|
||||
namespace {
|
||||
|
||||
// Array of features exposed through the Java ContentFeatureList API. Entries in
|
||||
// this array may either refer to features defined in the header of this file or
|
||||
// in other locations in the code base (e.g. content_features.h).
|
||||
const base::Feature* const kFeaturesExposedToJava[] = {
|
||||
&features::kCrashBrowserOnChildMismatchIfBrowserChanged,
|
||||
&features::kCrashBrowserOnAnyChildMismatch,
|
||||
}; // namespace
|
||||
|
||||
const base::Feature* FindFeatureExposedToJava(const std::string& feature_name) {
|
||||
for (const base::Feature* feature : kFeaturesExposedToJava) {
|
||||
if (feature->name == feature_name)
|
||||
return feature;
|
||||
}
|
||||
NOTREACHED() << "Queried feature cannot be found in BaseFeatureList: "
|
||||
<< feature_name;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
static jboolean JNI_BaseFeatureList_IsEnabled(
|
||||
JNIEnv* env,
|
||||
const JavaParamRef<jstring>& jfeature_name) {
|
||||
const base::Feature* feature =
|
||||
FindFeatureExposedToJava(ConvertJavaStringToUTF8(env, jfeature_name));
|
||||
return base::FeatureList::IsEnabled(*feature);
|
||||
}
|
||||
|
||||
} // namespace base::android
|
25
src/base/android/base_features.cc
Normal file
25
src/base/android/base_features.cc
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/android/base_features.h"
|
||||
#include "base/feature_list.h"
|
||||
|
||||
namespace base::android::features {
|
||||
|
||||
// Alphabetical:
|
||||
|
||||
// Crash the browser process if a child process is created which does not match
|
||||
// the browser process and the browser package appears to have changed since the
|
||||
// browser process was launched, so that the browser process will be started
|
||||
// fresh when next used, hopefully resolving the issue.
|
||||
const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged{
|
||||
"CrashBrowserOnChildMismatchIfBrowserChanged", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// Crash the browser process if a child process is created which does not match
|
||||
// the browser process regardless of whether the browser package appears to have
|
||||
// changed.
|
||||
const base::Feature kCrashBrowserOnAnyChildMismatch{
|
||||
"CrashBrowserOnAnyChildMismatch", FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
} // namespace base::android::features
|
21
src/base/android/base_features.h
Normal file
21
src/base/android/base_features.h
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2022 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ANDROID_BASE_FEATURES_H_
|
||||
#define BASE_ANDROID_BASE_FEATURES_H_
|
||||
|
||||
#include "base/feature_list.h"
|
||||
|
||||
namespace base::android::features {
|
||||
|
||||
// All features in alphabetical order. The features should be documented
|
||||
// alongside the definition of their values in the .cc file.
|
||||
|
||||
// Alphabetical:
|
||||
extern const base::Feature kCrashBrowserOnChildMismatchIfBrowserChanged;
|
||||
extern const base::Feature kCrashBrowserOnAnyChildMismatch;
|
||||
|
||||
} // namespace base::android::features
|
||||
|
||||
#endif // BASE_ANDROID_BASE_FEATURES_H_
|
@ -80,7 +80,8 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
|
||||
is_debug_android_(GetIntParam(params, 22)),
|
||||
is_tv_(GetIntParam(params, 23)),
|
||||
version_incremental_(StrDupParam(params, 24)),
|
||||
hardware_(StrDupParam(params, 25)) {}
|
||||
hardware_(StrDupParam(params, 25)),
|
||||
is_at_least_t_(GetIntParam(params, 26)) {}
|
||||
|
||||
// static
|
||||
BuildInfo* BuildInfo::GetInstance() {
|
||||
|
@ -140,6 +140,8 @@ class BASE_EXPORT BuildInfo {
|
||||
|
||||
const char* hardware() const { return hardware_; }
|
||||
|
||||
bool is_at_least_t() const { return is_at_least_t_; }
|
||||
|
||||
private:
|
||||
friend struct BuildInfoSingletonTraits;
|
||||
|
||||
@ -176,6 +178,7 @@ class BASE_EXPORT BuildInfo {
|
||||
const bool is_tv_;
|
||||
const char* const version_incremental_;
|
||||
const char* const hardware_;
|
||||
const bool is_at_least_t_;
|
||||
};
|
||||
|
||||
} // namespace android
|
||||
|
@ -58,7 +58,8 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
|
||||
is_debug_android_(false),
|
||||
is_tv_(false),
|
||||
version_incremental_(""),
|
||||
hardware_("") {}
|
||||
hardware_(""),
|
||||
is_at_least_t_(false) {}
|
||||
|
||||
// static
|
||||
BuildInfo* BuildInfo::GetInstance() {
|
||||
|
@ -82,8 +82,8 @@ java_annotation_processor("jni_processor") {
|
||||
|
||||
# Avoids a circular dependency with base:base_java. This is okay because
|
||||
# no target should ever expect to package an annotation processor.
|
||||
"//base/android/java/src/org/chromium/base/annotations/CheckDiscard.java",
|
||||
"//base/android/java/src/org/chromium/base/annotations/MainDex.java",
|
||||
"//build/android/java/src/org/chromium/base/annotations/CheckDiscard.java",
|
||||
"//build/android/java/src/org/chromium/base/annotations/MainDex.java",
|
||||
]
|
||||
|
||||
main_class = "org.chromium.jni_generator.JniProcessor"
|
||||
|
@ -979,12 +979,17 @@ class JNIFromJavaSource(object):
|
||||
class HeaderFileGeneratorHelper(object):
|
||||
"""Include helper methods for header generators."""
|
||||
|
||||
def __init__(self, class_name, fully_qualified_class, use_proxy_hash,
|
||||
split_name):
|
||||
def __init__(self,
|
||||
class_name,
|
||||
fully_qualified_class,
|
||||
use_proxy_hash,
|
||||
split_name=None,
|
||||
enable_jni_multiplexing=False):
|
||||
self.class_name = class_name
|
||||
self.fully_qualified_class = fully_qualified_class
|
||||
self.use_proxy_hash = use_proxy_hash
|
||||
self.split_name = split_name
|
||||
self.enable_jni_multiplexing = enable_jni_multiplexing
|
||||
|
||||
def GetStubName(self, native):
|
||||
"""Return the name of the stub function for this native method.
|
||||
@ -1001,7 +1006,9 @@ class HeaderFileGeneratorHelper(object):
|
||||
else:
|
||||
method_name = EscapeClassName(native.proxy_name)
|
||||
return 'Java_%s_%s' % (EscapeClassName(
|
||||
ProxyHelpers.GetQualifiedClass(self.use_proxy_hash)), method_name)
|
||||
ProxyHelpers.GetQualifiedClass(
|
||||
self.use_proxy_hash
|
||||
or self.enable_jni_multiplexing)), method_name)
|
||||
|
||||
template = Template('Java_${JAVA_NAME}_native${NAME}')
|
||||
|
||||
@ -1016,8 +1023,9 @@ class HeaderFileGeneratorHelper(object):
|
||||
ret = collections.OrderedDict()
|
||||
for entry in origin:
|
||||
if isinstance(entry, NativeMethod) and entry.is_proxy:
|
||||
ret[ProxyHelpers.GetClass(self.use_proxy_hash)] \
|
||||
= ProxyHelpers.GetQualifiedClass(self.use_proxy_hash)
|
||||
use_hash = self.use_proxy_hash or self.enable_jni_multiplexing
|
||||
ret[ProxyHelpers.GetClass(use_hash)] \
|
||||
= ProxyHelpers.GetQualifiedClass(use_hash)
|
||||
continue
|
||||
ret[self.class_name] = self.fully_qualified_class
|
||||
|
||||
@ -1050,7 +1058,8 @@ const char kClassPath_${JAVA_CLASS}[] = \
|
||||
}
|
||||
# Since all proxy methods use the same class, defining this in every
|
||||
# header file would result in duplicated extern initializations.
|
||||
if full_clazz != ProxyHelpers.GetQualifiedClass(self.use_proxy_hash):
|
||||
if full_clazz != ProxyHelpers.GetQualifiedClass(
|
||||
self.use_proxy_hash or self.enable_jni_multiplexing):
|
||||
ret += [template.substitute(values)]
|
||||
|
||||
class_getter = """\
|
||||
@ -1081,7 +1090,8 @@ JNI_REGISTRATION_EXPORT std::atomic<jclass> g_${JAVA_CLASS}_clazz(nullptr);
|
||||
}
|
||||
# Since all proxy methods use the same class, defining this in every
|
||||
# header file would result in duplicated extern initializations.
|
||||
if full_clazz != ProxyHelpers.GetQualifiedClass(self.use_proxy_hash):
|
||||
if full_clazz != ProxyHelpers.GetQualifiedClass(
|
||||
self.use_proxy_hash or self.enable_jni_multiplexing):
|
||||
ret += [template.substitute(values)]
|
||||
|
||||
return ''.join(ret)
|
||||
@ -1101,10 +1111,12 @@ class InlHeaderFileGenerator(object):
|
||||
self.constant_fields = constant_fields
|
||||
self.jni_params = jni_params
|
||||
self.options = options
|
||||
self.helper = HeaderFileGeneratorHelper(self.class_name,
|
||||
self.helper = HeaderFileGeneratorHelper(
|
||||
self.class_name,
|
||||
fully_qualified_class,
|
||||
self.options.use_proxy_hash,
|
||||
self.options.split_name)
|
||||
split_name=self.options.split_name,
|
||||
enable_jni_multiplexing=self.options.enable_jni_multiplexing)
|
||||
|
||||
def GetContent(self):
|
||||
"""Returns the content of the JNI binding file."""
|
||||
@ -1607,6 +1619,9 @@ See SampleForTests.java for more details.
|
||||
action='store_true',
|
||||
help='Hashes the native declaration of methods used '
|
||||
'in @JniNatives interface.')
|
||||
parser.add_argument('--enable_jni_multiplexing',
|
||||
action='store_true',
|
||||
help='Enables JNI multiplexing for Java native methods')
|
||||
parser.add_argument(
|
||||
'--split_name',
|
||||
help='Split name that the Java classes should be loaded from.')
|
||||
|
@ -13,6 +13,7 @@ file.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import difflib
|
||||
import inspect
|
||||
import optparse
|
||||
@ -59,6 +60,7 @@ class TestOptions(object):
|
||||
self.enable_profiling = False
|
||||
self.enable_tracing = False
|
||||
self.use_proxy_hash = False
|
||||
self.enable_jni_multiplexing = False
|
||||
self.always_mangle = False
|
||||
self.unchecked_exceptions = False
|
||||
self.split_name = None
|
||||
@ -69,7 +71,8 @@ class BaseTest(unittest.TestCase):
|
||||
@staticmethod
|
||||
def _MergeRegistrationForTests(results,
|
||||
header_guard='HEADER_GUARD',
|
||||
namespace='test'):
|
||||
namespace='test',
|
||||
enable_jni_multiplexing=False):
|
||||
|
||||
results.sort(key=lambda d: d['FULL_CLASS_NAME'])
|
||||
|
||||
@ -79,6 +82,27 @@ class BaseTest(unittest.TestCase):
|
||||
|
||||
combined_dict['HEADER_GUARD'] = header_guard
|
||||
combined_dict['NAMESPACE'] = namespace
|
||||
|
||||
if enable_jni_multiplexing:
|
||||
proxy_signatures_list = sorted(
|
||||
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
|
||||
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
|
||||
signature for signature in proxy_signatures_list)
|
||||
|
||||
proxy_native_array_list = sorted(
|
||||
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split(
|
||||
'},\n')))
|
||||
combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join(
|
||||
p for p in proxy_native_array_list if p != '') + '}'
|
||||
|
||||
signature_to_cases = collections.defaultdict(list)
|
||||
for d in results:
|
||||
for signature, cases in d['SIGNATURE_TO_CASES'].items():
|
||||
signature_to_cases[signature].extend(cases)
|
||||
combined_dict[
|
||||
'FORWARDING_CALLS'] = jni_registration_generator._AddForwardingCalls(
|
||||
signature_to_cases, namespace)
|
||||
|
||||
return combined_dict
|
||||
|
||||
def _JoinScriptDir(self, path):
|
||||
@ -356,7 +380,7 @@ class TestGenerator(BaseTest):
|
||||
natives, [], [], jni_params,
|
||||
TestOptions())
|
||||
self.AssertGoldenTextEquals(h1.GetContent())
|
||||
h2 = jni_registration_generator.HeaderGenerator(
|
||||
h2 = jni_registration_generator.HeaderGenerator('',
|
||||
'',
|
||||
'org/chromium/TestJni',
|
||||
natives,
|
||||
@ -365,8 +389,8 @@ class TestGenerator(BaseTest):
|
||||
use_proxy_hash=False)
|
||||
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(content, use_hash=False),
|
||||
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
|
||||
content, use_hash=False, manual_jni_registration=True),
|
||||
suffix='Registrations')
|
||||
|
||||
def testInnerClassNatives(self):
|
||||
@ -457,7 +481,7 @@ class TestGenerator(BaseTest):
|
||||
TestOptions())
|
||||
self.AssertGoldenTextEquals(h.GetContent())
|
||||
|
||||
h2 = jni_registration_generator.HeaderGenerator(
|
||||
h2 = jni_registration_generator.HeaderGenerator('',
|
||||
'',
|
||||
'org/chromium/TestJni',
|
||||
natives,
|
||||
@ -466,8 +490,8 @@ class TestGenerator(BaseTest):
|
||||
use_proxy_hash=False)
|
||||
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(content, use_hash=False),
|
||||
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
|
||||
content, use_hash=False, manual_jni_registration=True),
|
||||
suffix='Registrations')
|
||||
|
||||
def testCalledByNatives(self):
|
||||
@ -1408,6 +1432,7 @@ class ProxyTestGenerator(BaseTest):
|
||||
|
||||
jni_params = jni_generator.JniParams(qualified_clazz)
|
||||
main_dex_header = jni_registration_generator.HeaderGenerator(
|
||||
'',
|
||||
'',
|
||||
qualified_clazz,
|
||||
natives,
|
||||
@ -1417,7 +1442,9 @@ class ProxyTestGenerator(BaseTest):
|
||||
content = TestGenerator._MergeRegistrationForTests([main_dex_header])
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(content, use_hash=False))
|
||||
jni_registration_generator.CreateFromDict(content,
|
||||
use_hash=False,
|
||||
manual_jni_registration=True))
|
||||
|
||||
other_qualified_clazz = 'test/foo/Bar'
|
||||
other_natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
|
||||
@ -1425,6 +1452,7 @@ class ProxyTestGenerator(BaseTest):
|
||||
|
||||
jni_params = jni_generator.JniParams(other_qualified_clazz)
|
||||
non_main_dex_header = jni_registration_generator.HeaderGenerator(
|
||||
'',
|
||||
'',
|
||||
other_qualified_clazz,
|
||||
other_natives,
|
||||
@ -1436,7 +1464,9 @@ class ProxyTestGenerator(BaseTest):
|
||||
[non_main_dex_header])
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(content, use_hash=False),
|
||||
jni_registration_generator.CreateFromDict(content,
|
||||
use_hash=False,
|
||||
manual_jni_registration=True),
|
||||
'AndNonMainDex')
|
||||
|
||||
def testProxyNatives(self):
|
||||
@ -1522,17 +1552,25 @@ class ProxyTestGenerator(BaseTest):
|
||||
h1 = jni_generator.InlHeaderFileGenerator('', qualified_clazz, natives, [],
|
||||
[], jni_params, TestOptions())
|
||||
self.AssertGoldenTextEquals(h1.GetContent())
|
||||
h2 = jni_registration_generator.HeaderGenerator(
|
||||
'', qualified_clazz, natives, jni_params, False, use_proxy_hash=False)
|
||||
h2 = jni_registration_generator.HeaderGenerator('',
|
||||
'',
|
||||
qualified_clazz,
|
||||
natives,
|
||||
jni_params,
|
||||
False,
|
||||
use_proxy_hash=False)
|
||||
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
|
||||
|
||||
proxy_opts = jni_registration_generator.ProxyOptions()
|
||||
proxy_opts = jni_registration_generator.ProxyOptions(
|
||||
manual_jni_registration=True)
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateProxyJavaFromDict(content, proxy_opts),
|
||||
suffix='Java')
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(content, proxy_opts.use_hash),
|
||||
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
|
||||
content,
|
||||
proxy_opts.use_hash,
|
||||
manual_jni_registration=proxy_opts.manual_jni_registration),
|
||||
suffix='Registrations')
|
||||
|
||||
def testProxyHashedExample(self):
|
||||
@ -1651,6 +1689,66 @@ class ProxyTestGenerator(BaseTest):
|
||||
self.AssertListEquals(golden_natives, _RemoveHashedNames(natives))
|
||||
|
||||
|
||||
class MultiplexTestGenerator(BaseTest):
|
||||
def testProxyMultiplexGenJni(self):
|
||||
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
|
||||
reg_dict = jni_registration_generator._DictForPath(
|
||||
self._JoinScriptDir(path),
|
||||
enable_jni_multiplexing=True,
|
||||
namespace='test')
|
||||
reg_dict = self._MergeRegistrationForTests([reg_dict],
|
||||
enable_jni_multiplexing=True)
|
||||
|
||||
proxy_opts = jni_registration_generator.ProxyOptions(
|
||||
enable_jni_multiplexing=True)
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateProxyJavaFromDict(
|
||||
reg_dict, proxy_opts),
|
||||
golden_file='testProxyMultiplexGenJni.golden')
|
||||
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateProxyJavaFromDict(reg_dict,
|
||||
proxy_opts,
|
||||
forwarding=True),
|
||||
golden_file='testProxyMultiplexGenJni.2.golden')
|
||||
|
||||
def testProxyMultiplexNatives(self):
|
||||
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
|
||||
reg_dict = jni_registration_generator._DictForPath(
|
||||
self._JoinScriptDir(path),
|
||||
enable_jni_multiplexing=True,
|
||||
namespace='test')
|
||||
reg_dict = self._MergeRegistrationForTests([reg_dict],
|
||||
enable_jni_multiplexing=True)
|
||||
|
||||
proxy_opts = jni_registration_generator.ProxyOptions(
|
||||
enable_jni_multiplexing=True)
|
||||
self.AssertGoldenTextEquals(jni_registration_generator.CreateFromDict(
|
||||
reg_dict,
|
||||
proxy_opts.use_hash,
|
||||
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing),
|
||||
golden_file='testProxyMultiplexNatives.golden')
|
||||
|
||||
def testProxyMultiplexNativesRegistration(self):
|
||||
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
|
||||
reg_dict_for_registration = jni_registration_generator._DictForPath(
|
||||
self._JoinScriptDir(path),
|
||||
enable_jni_multiplexing=True,
|
||||
namespace='test')
|
||||
reg_dict_for_registration = self._MergeRegistrationForTests(
|
||||
[reg_dict_for_registration], enable_jni_multiplexing=True)
|
||||
|
||||
proxy_opts = jni_registration_generator.ProxyOptions(
|
||||
enable_jni_multiplexing=True)
|
||||
self.AssertGoldenTextEquals(
|
||||
jni_registration_generator.CreateFromDict(
|
||||
reg_dict_for_registration,
|
||||
proxy_opts.use_hash,
|
||||
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
|
||||
manual_jni_registration=True),
|
||||
golden_file='testProxyMultiplexNativesRegistration.golden')
|
||||
|
||||
|
||||
def TouchStamp(stamp_path):
|
||||
dir_name = os.path.dirname(stamp_path)
|
||||
if not os.path.isdir(dir_name):
|
||||
|
@ -12,6 +12,7 @@ to register all native methods that exist within an application."""
|
||||
import argparse
|
||||
import collections
|
||||
import functools
|
||||
import hashlib
|
||||
import multiprocessing
|
||||
import os
|
||||
import string
|
||||
@ -56,23 +57,16 @@ def _Generate(java_file_paths,
|
||||
header_path: If specified, generates a header file in this location.
|
||||
namespace: If specified, sets the namespace for the generated header file.
|
||||
"""
|
||||
# For JNI multiplexing, a 16-bit prefix is used to identify each individual
|
||||
# java file path. This allows fewer multiplexed functions to resolve multiple
|
||||
# different native functions with the same signature across the JNI boundary
|
||||
# using switch statements. Should not exceed 65536 (2**16) number of paths.
|
||||
assert len(java_file_paths) < 65536
|
||||
java_path_prefix_tuples = [(path, index)
|
||||
for index, path in enumerate(java_file_paths)]
|
||||
# Without multiprocessing, script takes ~13 seconds for chrome_public_apk
|
||||
# on a z620. With multiprocessing, takes ~2 seconds.
|
||||
results = []
|
||||
with multiprocessing.Pool() as pool:
|
||||
for d in pool.imap_unordered(
|
||||
functools.partial(
|
||||
_DictForPathAndPrefix,
|
||||
_DictForPath,
|
||||
use_proxy_hash=proxy_opts.use_hash,
|
||||
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing),
|
||||
java_path_prefix_tuples):
|
||||
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
|
||||
namespace=namespace), java_file_paths):
|
||||
if d:
|
||||
results.append(d)
|
||||
|
||||
@ -82,25 +76,43 @@ def _Generate(java_file_paths,
|
||||
combined_dict = {}
|
||||
for key in MERGEABLE_KEYS:
|
||||
combined_dict[key] = ''.join(d.get(key, '') for d in results)
|
||||
# PROXY_NATIVE_SIGNATURES will have duplicates for JNI multiplexing since
|
||||
# all native methods with similar signatures map to the same proxy.
|
||||
# PROXY_NATIVE_SIGNATURES and PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX will have
|
||||
# duplicates for JNI multiplexing since all native methods with similar
|
||||
# signatures map to the same proxy. Similarly, there may be multiple switch
|
||||
# case entries for the same proxy signatures.
|
||||
if proxy_opts.enable_jni_multiplexing:
|
||||
proxy_signatures_list = sorted(
|
||||
set(combined_dict['PROXY_NATIVE_SIGNATURES'].split('\n')))
|
||||
combined_dict['PROXY_NATIVE_SIGNATURES'] = '\n'.join(
|
||||
signature for signature in proxy_signatures_list)
|
||||
|
||||
proxy_native_array_list = sorted(
|
||||
set(combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'].split('},\n')))
|
||||
combined_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'] = '},\n'.join(
|
||||
p for p in proxy_native_array_list if p != '') + '}'
|
||||
|
||||
signature_to_cases = collections.defaultdict(list)
|
||||
for d in results:
|
||||
for signature, cases in d['SIGNATURE_TO_CASES'].items():
|
||||
signature_to_cases[signature].extend(cases)
|
||||
combined_dict['FORWARDING_CALLS'] = _AddForwardingCalls(
|
||||
signature_to_cases, namespace)
|
||||
|
||||
if header_path:
|
||||
combined_dict['HEADER_GUARD'] = \
|
||||
os.path.splitext(header_path)[0].replace('/', '_').upper() + '_'
|
||||
combined_dict['NAMESPACE'] = namespace
|
||||
header_content = CreateFromDict(combined_dict, proxy_opts.use_hash)
|
||||
header_content = CreateFromDict(
|
||||
combined_dict,
|
||||
proxy_opts.use_hash,
|
||||
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
|
||||
manual_jni_registration=proxy_opts.manual_jni_registration)
|
||||
with build_utils.AtomicOutput(header_path, mode='w') as f:
|
||||
f.write(header_content)
|
||||
|
||||
with build_utils.AtomicOutput(srcjar_path) as f:
|
||||
with zipfile.ZipFile(f, 'w') as srcjar:
|
||||
if proxy_opts.use_hash:
|
||||
if proxy_opts.use_hash or proxy_opts.enable_jni_multiplexing:
|
||||
# J/N.java
|
||||
build_utils.AddToZipHermetic(
|
||||
srcjar,
|
||||
@ -120,20 +132,10 @@ def _Generate(java_file_paths,
|
||||
data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
|
||||
|
||||
|
||||
# A wrapper for imap_ordered to call with a tuple.
|
||||
def _DictForPathAndPrefix(path_prefix_tuple, use_proxy_hash,
|
||||
enable_jni_multiplexing):
|
||||
path, switch_prefix = path_prefix_tuple
|
||||
return _DictForPath(path,
|
||||
use_proxy_hash=use_proxy_hash,
|
||||
enable_jni_multiplexing=enable_jni_multiplexing,
|
||||
switch_prefix=switch_prefix)
|
||||
|
||||
|
||||
def _DictForPath(path,
|
||||
use_proxy_hash=False,
|
||||
enable_jni_multiplexing=False,
|
||||
switch_prefix=None):
|
||||
namespace=''):
|
||||
with open(path) as f:
|
||||
contents = jni_generator.RemoveComments(f.read())
|
||||
if '@JniIgnoreNatives' in contents:
|
||||
@ -149,23 +151,68 @@ def _DictForPath(path,
|
||||
ptr_type='long')
|
||||
if len(natives) == 0:
|
||||
return None
|
||||
namespace = jni_generator.ExtractJNINamespace(contents)
|
||||
# The namespace for the content is separate from the namespace for the
|
||||
# generated header file.
|
||||
content_namespace = jni_generator.ExtractJNINamespace(contents)
|
||||
jni_params = jni_generator.JniParams(fully_qualified_class)
|
||||
jni_params.ExtractImportsAndInnerClasses(contents)
|
||||
is_main_dex = jni_generator.IsMainDexJavaClass(contents)
|
||||
header_generator = HeaderGenerator(
|
||||
namespace,
|
||||
content_namespace,
|
||||
fully_qualified_class,
|
||||
natives,
|
||||
jni_params,
|
||||
is_main_dex,
|
||||
use_proxy_hash,
|
||||
enable_jni_multiplexing=enable_jni_multiplexing,
|
||||
switch_prefix=switch_prefix)
|
||||
enable_jni_multiplexing=enable_jni_multiplexing)
|
||||
return header_generator.Generate()
|
||||
|
||||
|
||||
def _SetProxyRegistrationFields(registration_dict, use_hash):
|
||||
def _AddForwardingCalls(signature_to_cases, namespace):
|
||||
template = string.Template("""
|
||||
JNI_GENERATOR_EXPORT ${RETURN} Java_${CLASS_NAME}_${PROXY_SIGNATURE}(
|
||||
JNIEnv* env,
|
||||
jclass jcaller,
|
||||
${PARAMS_IN_STUB}) {
|
||||
switch (switch_num) {
|
||||
${CASES}
|
||||
default:
|
||||
CHECK(false) << "JNI multiplexing function Java_\
|
||||
${CLASS_NAME}_${PROXY_SIGNATURE} was called with an invalid switch number: "\
|
||||
<< switch_num;
|
||||
return${DEFAULT_RETURN};
|
||||
}
|
||||
}""")
|
||||
|
||||
switch_statements = []
|
||||
for signature, cases in sorted(signature_to_cases.items()):
|
||||
return_type, params_list = signature
|
||||
params_in_stub = _GetJavaToNativeParamsList(params_list)
|
||||
switch_statements.append(
|
||||
template.substitute({
|
||||
'RETURN':
|
||||
jni_generator.JavaDataTypeToC(return_type),
|
||||
'CLASS_NAME':
|
||||
jni_generator.EscapeClassName(
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(True) + namespace),
|
||||
'PROXY_SIGNATURE':
|
||||
jni_generator.EscapeClassName(
|
||||
_GetMultiplexProxyName(return_type, params_list)),
|
||||
'PARAMS_IN_STUB':
|
||||
params_in_stub,
|
||||
'CASES':
|
||||
''.join(cases),
|
||||
'DEFAULT_RETURN':
|
||||
'' if return_type == 'void' else ' {}',
|
||||
}))
|
||||
|
||||
return ''.join(s for s in switch_statements)
|
||||
|
||||
|
||||
def _SetProxyRegistrationFields(registration_dict, use_hash,
|
||||
enable_jni_multiplexing,
|
||||
manual_jni_registration):
|
||||
registration_template = string.Template("""\
|
||||
|
||||
static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = {
|
||||
@ -202,17 +249,46 @@ JNI_REGISTRATION_EXPORT bool ${REGISTRATION_NAME}(JNIEnv* env) {
|
||||
}
|
||||
""")
|
||||
|
||||
manual_registration = string.Template("""\
|
||||
// Step 3: Method declarations.
|
||||
|
||||
${JNI_NATIVE_METHOD_ARRAY}\
|
||||
${PROXY_NATIVE_METHOD_ARRAY}\
|
||||
|
||||
${JNI_NATIVE_METHOD}
|
||||
// Step 4: Main dex and non-main dex registration functions.
|
||||
|
||||
namespace ${NAMESPACE} {
|
||||
|
||||
bool RegisterMainDexNatives(JNIEnv* env) {\
|
||||
${REGISTER_MAIN_DEX_PROXY_NATIVES}
|
||||
${REGISTER_MAIN_DEX_NATIVES}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterNonMainDexNatives(JNIEnv* env) {\
|
||||
${REGISTER_PROXY_NATIVES}
|
||||
${REGISTER_NON_MAIN_DEX_NATIVES}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ${NAMESPACE}
|
||||
""")
|
||||
|
||||
sub_dict = {
|
||||
'ESCAPED_PROXY_CLASS':
|
||||
jni_generator.EscapeClassName(
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash)),
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(
|
||||
use_hash or enable_jni_multiplexing)),
|
||||
'PROXY_CLASS':
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash),
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash
|
||||
or enable_jni_multiplexing),
|
||||
'KMETHODS':
|
||||
registration_dict['PROXY_NATIVE_METHOD_ARRAY'],
|
||||
'REGISTRATION_NAME':
|
||||
jni_generator.GetRegistrationFunctionName(
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash)),
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(
|
||||
use_hash or enable_jni_multiplexing)),
|
||||
}
|
||||
|
||||
if registration_dict['PROXY_NATIVE_METHOD_ARRAY']:
|
||||
@ -236,6 +312,12 @@ JNI_REGISTRATION_EXPORT bool ${REGISTRATION_NAME}(JNIEnv* env) {
|
||||
registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration
|
||||
registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call
|
||||
|
||||
if manual_jni_registration:
|
||||
registration_dict['MANUAL_REGISTRATION'] = manual_registration.substitute(
|
||||
registration_dict)
|
||||
else:
|
||||
registration_dict['MANUAL_REGISTRATION'] = ''
|
||||
|
||||
|
||||
def CreateProxyJavaFromDict(registration_dict, proxy_opts, forwarding=False):
|
||||
template = string.Template("""\
|
||||
@ -255,11 +337,13 @@ ${METHODS}
|
||||
}
|
||||
""")
|
||||
|
||||
is_natives_class = not forwarding and proxy_opts.use_hash
|
||||
is_natives_class = not forwarding and (proxy_opts.use_hash
|
||||
or proxy_opts.enable_jni_multiplexing)
|
||||
class_name = jni_generator.ProxyHelpers.GetClass(is_natives_class)
|
||||
package = jni_generator.ProxyHelpers.GetPackage(is_natives_class)
|
||||
|
||||
if forwarding or not proxy_opts.use_hash:
|
||||
if forwarding or not (proxy_opts.use_hash
|
||||
or proxy_opts.enable_jni_multiplexing):
|
||||
fields = string.Template("""\
|
||||
public static final boolean TESTING_ENABLED = ${TESTING_ENABLED};
|
||||
public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK};
|
||||
@ -283,7 +367,10 @@ ${METHODS}
|
||||
})
|
||||
|
||||
|
||||
def CreateFromDict(registration_dict, use_hash):
|
||||
def CreateFromDict(registration_dict,
|
||||
use_hash,
|
||||
enable_jni_multiplexing=False,
|
||||
manual_jni_registration=False):
|
||||
"""Returns the content of the header file."""
|
||||
|
||||
template = string.Template("""\
|
||||
@ -313,54 +400,52 @@ ${CLASS_PATH_DECLARATIONS}
|
||||
// Step 2: Forward declarations (methods).
|
||||
|
||||
${FORWARD_DECLARATIONS}
|
||||
|
||||
// Step 3: Method declarations.
|
||||
|
||||
${JNI_NATIVE_METHOD_ARRAY}\
|
||||
${PROXY_NATIVE_METHOD_ARRAY}\
|
||||
|
||||
${JNI_NATIVE_METHOD}
|
||||
// Step 4: Main dex and non-main dex registration functions.
|
||||
|
||||
namespace ${NAMESPACE} {
|
||||
|
||||
bool RegisterMainDexNatives(JNIEnv* env) {\
|
||||
${REGISTER_MAIN_DEX_PROXY_NATIVES}
|
||||
${REGISTER_MAIN_DEX_NATIVES}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterNonMainDexNatives(JNIEnv* env) {\
|
||||
${REGISTER_PROXY_NATIVES}
|
||||
${REGISTER_NON_MAIN_DEX_NATIVES}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ${NAMESPACE}
|
||||
|
||||
${FORWARDING_CALLS}
|
||||
${MANUAL_REGISTRATION}
|
||||
#endif // ${HEADER_GUARD}
|
||||
""")
|
||||
_SetProxyRegistrationFields(registration_dict, use_hash)
|
||||
|
||||
_SetProxyRegistrationFields(registration_dict, use_hash,
|
||||
enable_jni_multiplexing, manual_jni_registration)
|
||||
if not enable_jni_multiplexing:
|
||||
registration_dict['FORWARDING_CALLS'] = ''
|
||||
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
|
||||
return ''
|
||||
|
||||
return template.substitute(registration_dict)
|
||||
|
||||
|
||||
def _GetJavaToNativeParamsList(params_list):
|
||||
if not params_list:
|
||||
return 'jlong switch_num'
|
||||
|
||||
# Parameters are named after their type, with a unique number per parameter
|
||||
# type to make sure the names are unique, even within the same types.
|
||||
params_type_count = collections.defaultdict(int)
|
||||
params_in_stub = []
|
||||
for p in params_list:
|
||||
params_type_count[p] += 1
|
||||
params_in_stub.append(
|
||||
'%s %s_param%d' %
|
||||
(jni_generator.JavaDataTypeToC(p), p.replace(
|
||||
'[]', '_array').lower(), params_type_count[p]))
|
||||
|
||||
return 'jlong switch_num, ' + ', '.join(params_in_stub)
|
||||
|
||||
|
||||
class HeaderGenerator(object):
|
||||
"""Generates an inline header file for JNI registration."""
|
||||
|
||||
def __init__(self,
|
||||
namespace,
|
||||
content_namespace,
|
||||
fully_qualified_class,
|
||||
natives,
|
||||
jni_params,
|
||||
main_dex,
|
||||
use_proxy_hash,
|
||||
enable_jni_multiplexing=False,
|
||||
switch_prefix=None):
|
||||
enable_jni_multiplexing=False):
|
||||
self.namespace = namespace
|
||||
self.content_namespace = content_namespace
|
||||
self.natives = natives
|
||||
self.proxy_natives = [n for n in natives if n.is_proxy]
|
||||
self.non_proxy_natives = [n for n in natives if not n.is_proxy]
|
||||
@ -369,12 +454,12 @@ class HeaderGenerator(object):
|
||||
self.class_name = self.fully_qualified_class.split('/')[-1]
|
||||
self.main_dex = main_dex
|
||||
self.helper = jni_generator.HeaderFileGeneratorHelper(
|
||||
self.class_name, fully_qualified_class, use_proxy_hash, None)
|
||||
self.class_name,
|
||||
fully_qualified_class,
|
||||
use_proxy_hash,
|
||||
enable_jni_multiplexing=enable_jni_multiplexing)
|
||||
self.use_proxy_hash = use_proxy_hash
|
||||
self.enable_jni_multiplexing = enable_jni_multiplexing
|
||||
# Each java file path is assigned a 16-bit integer as a prefix to the
|
||||
# switch number to ensure uniqueness across all native methods.
|
||||
self.switch_prefix = switch_prefix
|
||||
self.registration_dict = None
|
||||
|
||||
def Generate(self):
|
||||
@ -394,8 +479,9 @@ class HeaderGenerator(object):
|
||||
for native in self.proxy_natives))
|
||||
if self.enable_jni_multiplexing:
|
||||
self._AssignSwitchNumberToNatives()
|
||||
self._AddCases()
|
||||
|
||||
if self.use_proxy_hash:
|
||||
if self.use_proxy_hash or self.enable_jni_multiplexing:
|
||||
self.registration_dict['FORWARDING_PROXY_METHODS'] = ('\n'.join(
|
||||
_MakeForwardingProxy(
|
||||
native, enable_jni_multiplexing=self.enable_jni_multiplexing)
|
||||
@ -460,8 +546,8 @@ ${KMETHODS}
|
||||
""")
|
||||
open_namespace = ''
|
||||
close_namespace = ''
|
||||
if self.namespace:
|
||||
parts = self.namespace.split('::')
|
||||
if self.content_namespace:
|
||||
parts = self.content_namespace.split('::')
|
||||
all_namespaces = ['namespace %s {' % ns for ns in parts]
|
||||
open_namespace = '\n'.join(all_namespaces) + '\n'
|
||||
all_namespaces = ['} // namespace %s' % ns for ns in parts]
|
||||
@ -469,8 +555,9 @@ ${KMETHODS}
|
||||
close_namespace = '\n'.join(all_namespaces) + '\n\n'
|
||||
|
||||
body = self._SubstituteNativeMethods(template)
|
||||
self._SetDictValue('JNI_NATIVE_METHOD_ARRAY', ''.join((open_namespace, body,
|
||||
close_namespace)))
|
||||
if body:
|
||||
self._SetDictValue('JNI_NATIVE_METHOD_ARRAY', ''.join(
|
||||
(open_namespace, body, close_namespace)))
|
||||
|
||||
def _GetKMethodsString(self, clazz):
|
||||
ret = []
|
||||
@ -485,27 +572,39 @@ ${KMETHODS}
|
||||
'reinterpret_cast<void*>(${STUB_NAME}) },')
|
||||
|
||||
name = 'native' + native.name
|
||||
jni_signature = self.jni_params.Signature(native.params, native.return_type)
|
||||
stub_name = self.helper.GetStubName(native)
|
||||
|
||||
if native.is_proxy:
|
||||
# Literal name of the native method in the class that contains the actual
|
||||
# native declaration.
|
||||
if self.use_proxy_hash:
|
||||
if self.enable_jni_multiplexing:
|
||||
return_type, params_list = native.return_and_signature
|
||||
class_name = jni_generator.EscapeClassName(
|
||||
jni_generator.ProxyHelpers.GetQualifiedClass(True) + self.namespace)
|
||||
proxy_signature = jni_generator.EscapeClassName(
|
||||
_GetMultiplexProxyName(return_type, params_list))
|
||||
|
||||
name = _GetMultiplexProxyName(return_type, params_list)
|
||||
jni_signature = self.jni_params.Signature(
|
||||
[jni_generator.Param(datatype='long', name='switch_num')] +
|
||||
native.params, native.return_type)
|
||||
stub_name = 'Java_' + class_name + '_' + proxy_signature
|
||||
elif self.use_proxy_hash:
|
||||
name = native.hashed_proxy_name
|
||||
else:
|
||||
name = native.proxy_name
|
||||
values = {
|
||||
'NAME':
|
||||
name,
|
||||
'JNI_SIGNATURE':
|
||||
self.jni_params.Signature(native.params, native.return_type),
|
||||
'STUB_NAME':
|
||||
self.helper.GetStubName(native)
|
||||
'NAME': name,
|
||||
'JNI_SIGNATURE': jni_signature,
|
||||
'STUB_NAME': stub_name
|
||||
}
|
||||
return template.substitute(values)
|
||||
|
||||
def _AddProxyNativeMethodKStrings(self):
|
||||
"""Returns KMethodString for wrapped native methods in all_classes """
|
||||
|
||||
if self.main_dex:
|
||||
if self.main_dex or self.enable_jni_multiplexing:
|
||||
key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'
|
||||
else:
|
||||
key = 'PROXY_NATIVE_METHOD_ARRAY'
|
||||
@ -524,13 +623,14 @@ ${KMETHODS}
|
||||
|
||||
for clazz, full_clazz in all_classes.items():
|
||||
if not sub_proxy:
|
||||
if clazz == jni_generator.ProxyHelpers.GetClass(self.use_proxy_hash):
|
||||
if clazz == jni_generator.ProxyHelpers.GetClass(
|
||||
self.use_proxy_hash or self.enable_jni_multiplexing):
|
||||
continue
|
||||
|
||||
kmethods = self._GetKMethodsString(clazz)
|
||||
namespace_str = ''
|
||||
if self.namespace:
|
||||
namespace_str = self.namespace + '::'
|
||||
if self.content_namespace:
|
||||
namespace_str = self.content_namespace + '::'
|
||||
if kmethods:
|
||||
values = {
|
||||
'NAMESPACE': namespace_str,
|
||||
@ -594,39 +694,84 @@ ${NATIVES}\
|
||||
return ''
|
||||
|
||||
def _AssignSwitchNumberToNatives(self):
|
||||
# The switch number for a native method is a 32-bit integer and indicates
|
||||
# which native implementation the method should be dispatched to across
|
||||
# the JNI multiplexing boundary.
|
||||
signature_to_methods = collections.defaultdict(list)
|
||||
# The switch number for a native method is a 64-bit long with the first
|
||||
# bit being a sign digit. The signed two's complement is taken when
|
||||
# appropriate to make use of negative numbers.
|
||||
for native in self.proxy_natives:
|
||||
same_signature_methods = signature_to_methods[native.return_and_signature]
|
||||
# Should not exceed 65536 (2**16) methods with same proxy signature.
|
||||
assert len(same_signature_methods) < 65536
|
||||
hashed_long = hashlib.md5(
|
||||
native.proxy_name.encode('utf-8')).hexdigest()[:16]
|
||||
switch_num = int(hashed_long, 16)
|
||||
if (switch_num & 1 << 63):
|
||||
switch_num -= (1 << 64)
|
||||
|
||||
native.switch_num = self.switch_prefix * (2**16) + len(
|
||||
same_signature_methods)
|
||||
same_signature_methods.append(native.proxy_name)
|
||||
native.switch_num = str(switch_num)
|
||||
|
||||
def _AddCases(self):
|
||||
# Switch cases are grouped together by the same proxy signatures.
|
||||
template = string.Template("""
|
||||
case ${SWITCH_NUM}:
|
||||
return ${STUB_NAME}(env, jcaller${PARAMS});
|
||||
""")
|
||||
|
||||
signature_to_cases = collections.defaultdict(list)
|
||||
for native in self.proxy_natives:
|
||||
signature = native.return_and_signature
|
||||
params = _GetParamsListForMultiplex(signature[1], with_types=False)
|
||||
values = {
|
||||
'SWITCH_NUM': native.switch_num,
|
||||
'STUB_NAME': self.helper.GetStubName(native),
|
||||
'PARAMS': params,
|
||||
}
|
||||
signature_to_cases[signature].append(template.substitute(values))
|
||||
|
||||
self.registration_dict['SIGNATURE_TO_CASES'] = signature_to_cases
|
||||
|
||||
|
||||
def _GetParamsListForMultiplex(params_list):
|
||||
def _GetParamsListForMultiplex(params_list, with_types):
|
||||
if not params_list:
|
||||
return 'int switch_num'
|
||||
return ''
|
||||
|
||||
# Parameters are named after their type, with a unique number per parameter
|
||||
# type to make sure the names are unique, even within the same types.
|
||||
params_type_count = collections.defaultdict(int)
|
||||
params_with_types = []
|
||||
params = []
|
||||
for p in params_list:
|
||||
params_type_count[p] += 1
|
||||
params_with_types.append(
|
||||
'%s %s_param%d' %
|
||||
(p, p.replace('[]', '_array').lower(), params_type_count[p]))
|
||||
param_type = p + ' ' if with_types else ''
|
||||
params.append(
|
||||
'%s%s_param%d' %
|
||||
(param_type, p.replace('[]', '_array').lower(), params_type_count[p]))
|
||||
|
||||
return ', '.join(params_with_types) + ', int switch_num'
|
||||
return ', ' + ', '.join(params)
|
||||
|
||||
|
||||
def _GetMultiplexProxyName(return_type):
|
||||
return 'resolve_for_' + return_type.replace('[]', '_array').lower()
|
||||
def _GetMultiplexProxyName(return_type, params_list):
|
||||
# Proxy signatures for methods are named after their return type and
|
||||
# parameters to ensure uniqueness, even for the same return types.
|
||||
params = ''
|
||||
if params_list:
|
||||
type_convert_dictionary = {
|
||||
'[]': 'A',
|
||||
'byte': 'B',
|
||||
'char': 'C',
|
||||
'double': 'D',
|
||||
'float': 'F',
|
||||
'int': 'I',
|
||||
'long': 'J',
|
||||
'Class': 'L',
|
||||
'Object': 'O',
|
||||
'String': 'R',
|
||||
'short': 'S',
|
||||
'Throwable': 'T',
|
||||
'boolean': 'Z',
|
||||
}
|
||||
# Parameter types could contain multi-dimensional arrays and every
|
||||
# instance of [] has to be replaced in the proxy signature name.
|
||||
for k, v in type_convert_dictionary.items():
|
||||
params_list = [p.replace(k, v) for p in params_list]
|
||||
params = '_' + ''.join(p for p in params_list)
|
||||
|
||||
return 'resolve_for_' + return_type.replace('[]', '_array').lower() + params
|
||||
|
||||
|
||||
def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
|
||||
@ -642,10 +787,11 @@ def _MakeForwardingProxy(proxy_native, enable_jni_multiplexing=False):
|
||||
|
||||
if enable_jni_multiplexing:
|
||||
if not param_names:
|
||||
param_names = proxy_native.switch_num
|
||||
param_names = proxy_native.switch_num + 'L'
|
||||
else:
|
||||
param_names += ', %s' % proxy_native.switch_num
|
||||
proxy_method_name = _GetMultiplexProxyName(proxy_native.return_type)
|
||||
param_names = proxy_native.switch_num + 'L, ' + param_names
|
||||
return_type, params_list = proxy_native.return_and_signature
|
||||
proxy_method_name = _GetMultiplexProxyName(return_type, params_list)
|
||||
else:
|
||||
proxy_method_name = proxy_native.hashed_proxy_name
|
||||
|
||||
@ -683,8 +829,9 @@ def _MakeProxySignature(proxy_native,
|
||||
|
||||
alt_name = None
|
||||
return_type, params_list = proxy_native.return_and_signature
|
||||
proxy_name = _GetMultiplexProxyName(return_type)
|
||||
params_with_types = _GetParamsListForMultiplex(params_list)
|
||||
proxy_name = _GetMultiplexProxyName(return_type, params_list)
|
||||
params_with_types = 'long switch_num' + _GetParamsListForMultiplex(
|
||||
params_list, with_types=True)
|
||||
elif use_proxy_hash:
|
||||
signature_template = string.Template("""
|
||||
// Original name: ${ALT_NAME}""" + native_method_line)
|
||||
@ -711,6 +858,7 @@ class ProxyOptions:
|
||||
def __init__(self, **kwargs):
|
||||
self.use_hash = kwargs.get('use_hash', False)
|
||||
self.enable_jni_multiplexing = kwargs.get('enable_jni_multiplexing', False)
|
||||
self.manual_jni_registration = kwargs.get('manual_jni_registration', False)
|
||||
self.enable_mocks = kwargs.get('enable_mocks', False)
|
||||
self.require_mocks = kwargs.get('require_mocks', False)
|
||||
# Can never require and disable.
|
||||
@ -766,6 +914,10 @@ def main(argv):
|
||||
'--enable_jni_multiplexing',
|
||||
action='store_true',
|
||||
help='Enables JNI multiplexing for Java native methods')
|
||||
arg_parser.add_argument(
|
||||
'--manual_jni_registration',
|
||||
action='store_true',
|
||||
help='Manually do JNI registration - required for crazy linker')
|
||||
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
|
||||
|
||||
if not args.enable_proxy_mocks and args.require_mocks:
|
||||
@ -773,10 +925,16 @@ def main(argv):
|
||||
'Invalid arguments: --require_mocks without --enable_proxy_mocks. '
|
||||
'Cannot require mocks if they are not enabled.')
|
||||
|
||||
if not args.header_path and args.manual_jni_registration:
|
||||
arg_parser.error(
|
||||
'Invalid arguments: --manual_jni_registration without --header-path. '
|
||||
'Cannot manually register JNI if there is no output header file.')
|
||||
|
||||
sources_files = sorted(set(build_utils.ParseGnList(args.sources_files)))
|
||||
proxy_opts = ProxyOptions(
|
||||
use_hash=args.use_proxy_hash,
|
||||
enable_jni_multiplexing=args.enable_jni_multiplexing,
|
||||
manual_jni_registration=args.manual_jni_registration,
|
||||
require_mocks=args.require_mocks,
|
||||
enable_mocks=args.enable_proxy_mocks)
|
||||
|
||||
@ -786,8 +944,7 @@ def main(argv):
|
||||
java_file_paths.extend(
|
||||
p for p in build_utils.ReadSourcesList(f)
|
||||
if p.startswith('..') and p not in args.sources_exclusions)
|
||||
_Generate(
|
||||
java_file_paths,
|
||||
_Generate(java_file_paths,
|
||||
args.srcjar_path,
|
||||
proxy_opts=proxy_opts,
|
||||
header_path=args.header_path,
|
||||
|
@ -66,7 +66,7 @@
|
||||
|
||||
# Never inline classes, methods, or fields with this annotation, but allow
|
||||
# shrinking and obfuscation.
|
||||
# Relevant to fields when they are needed to store strong refrences to objects
|
||||
# Relevant to fields when they are needed to store strong references to objects
|
||||
# that are held as weak references by native code.
|
||||
-if @org.chromium.base.annotations.DoNotInline class * {
|
||||
*** *(...);
|
||||
@ -81,6 +81,10 @@
|
||||
@org.chromium.base.annotations.DoNotInline <fields>;
|
||||
}
|
||||
|
||||
# Never merge classes horizontally or vertically with this annotation.
|
||||
# Relevant to classes being used as a key in maps or sets.
|
||||
-keep,allowaccessmodification,allowobfuscation,allowshrinking @DoNotClassMerge class *
|
||||
|
||||
# Keep all CREATOR fields within Parcelable that are kept.
|
||||
-keepclassmembers class org.chromium.** implements android.os.Parcelable {
|
||||
public static *** CREATOR;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/threading/thread.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/timer/timer.h"
|
||||
#include "build/build_config.h"
|
||||
#include "third_party/abseil-cpp/absl/types/optional.h"
|
||||
|
@ -36,7 +36,6 @@
|
||||
// - libstdc++: captures bits/c++config.h for __GLIBCXX__
|
||||
#include <cstddef>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "base/bind.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/callback_helpers.h"
|
||||
#include "base/check.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/template_util.h"
|
||||
#include "base/thread_annotations.h"
|
||||
|
@ -881,7 +881,7 @@ BanUnconstructedRefCountedReceiver(const Receiver& receiver, Unused&&...) {
|
||||
// ran fast enough, the newly created instance could be destroyed before `oo`
|
||||
// makes another reference.
|
||||
// Foo::Foo() {
|
||||
// base::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, this));
|
||||
// base::ThreadPool::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, this));
|
||||
// }
|
||||
//
|
||||
// scoped_refptr<Foo> oo = new Foo();
|
||||
@ -896,7 +896,7 @@ BanUnconstructedRefCountedReceiver(const Receiver& receiver, Unused&&...) {
|
||||
// // static
|
||||
// scoped_refptr<Foo> Foo::Create() {
|
||||
// auto foo = base::WrapRefCounted(new Foo());
|
||||
// base::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, foo));
|
||||
// base::ThreadPool::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, foo));
|
||||
// return foo;
|
||||
// }
|
||||
//
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "base/bind.h"
|
||||
#include "base/callback_forward.h"
|
||||
#include "base/callback_internal.h"
|
||||
#include "base/check.h"
|
||||
#include "base/notreached.h"
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -16,8 +16,10 @@
|
||||
#include <utility>
|
||||
|
||||
#include "base/atomicops.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/check.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
|
@ -47,7 +47,6 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/callback_internal.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/check.h"
|
||||
#include "base/template_util.h"
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <utility>
|
||||
|
||||
#include "base/as_const.h"
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/containers/vector_buffer.h"
|
||||
#include "base/ranges/algorithm.h"
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
|
||||
|
@ -138,6 +138,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/containers/stack_container.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <new>
|
||||
#include <utility>
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/containers/checked_iterators.h"
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "base/check.h"
|
||||
#include "base/check_op.h"
|
||||
#include "base/containers/util.h"
|
||||
#include "base/numerics/checked_math.h"
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef BASE_CPU_AFFINITY_POSIX_H_
|
||||
#define BASE_CPU_AFFINITY_POSIX_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/process/process_handle.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "third_party/abseil-cpp/absl/types/optional.h"
|
||||
|
@ -15,6 +15,7 @@
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
#include "base/bind.h"
|
||||
#include "base/ios/scoped_critical_action.h"
|
||||
#include "third_party/abseil-cpp/absl/types/optional.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
@ -22,24 +23,37 @@ namespace base {
|
||||
namespace internal {
|
||||
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
// Returns true if multi-tasking is supported on this iOS device.
|
||||
bool IsMultiTaskingSupported();
|
||||
|
||||
// This class wraps a closure so it can continue to run for a period of time
|
||||
// when the application goes to the background by using
|
||||
// |ios::ScopedCriticalAction|.
|
||||
class CriticalClosure {
|
||||
class ImmediateCriticalClosure {
|
||||
public:
|
||||
explicit CriticalClosure(StringPiece task_name, OnceClosure closure);
|
||||
CriticalClosure(const CriticalClosure&) = delete;
|
||||
CriticalClosure& operator=(const CriticalClosure&) = delete;
|
||||
~CriticalClosure();
|
||||
explicit ImmediateCriticalClosure(StringPiece task_name, OnceClosure closure);
|
||||
ImmediateCriticalClosure(const ImmediateCriticalClosure&) = delete;
|
||||
ImmediateCriticalClosure& operator=(const ImmediateCriticalClosure&) = delete;
|
||||
~ImmediateCriticalClosure();
|
||||
void Run();
|
||||
|
||||
private:
|
||||
ios::ScopedCriticalAction critical_action_;
|
||||
OnceClosure closure_;
|
||||
};
|
||||
|
||||
// This class is identical to ImmediateCriticalClosure, but the critical action
|
||||
// is started when the action runs, not when the CriticalAction is created.
|
||||
class PendingCriticalClosure {
|
||||
public:
|
||||
explicit PendingCriticalClosure(StringPiece task_name, OnceClosure closure);
|
||||
PendingCriticalClosure(const PendingCriticalClosure&) = delete;
|
||||
PendingCriticalClosure& operator=(const PendingCriticalClosure&) = delete;
|
||||
~PendingCriticalClosure();
|
||||
void Run();
|
||||
|
||||
private:
|
||||
absl::optional<ios::ScopedCriticalAction> critical_action_;
|
||||
std::string task_name_;
|
||||
OnceClosure closure_;
|
||||
};
|
||||
#endif // BUILDFLAG(IS_IOS)
|
||||
|
||||
} // namespace internal
|
||||
@ -47,7 +61,9 @@ class CriticalClosure {
|
||||
// Returns a closure that will continue to run for a period of time when the
|
||||
// application goes to the background if possible on platforms where
|
||||
// applications don't execute while backgrounded, otherwise the original task is
|
||||
// returned.
|
||||
// returned. If |is_immediate| is true, the closure will immediately prevent
|
||||
// background suspension. Otherwise, the closure will wait to request background
|
||||
// permission until it is run.
|
||||
//
|
||||
// Example:
|
||||
// file_task_runner_->PostTask(
|
||||
@ -61,28 +77,39 @@ class CriticalClosure {
|
||||
// that do not complete in time for suspension.
|
||||
#if BUILDFLAG(IS_IOS)
|
||||
inline OnceClosure MakeCriticalClosure(StringPiece task_name,
|
||||
OnceClosure closure) {
|
||||
DCHECK(internal::IsMultiTaskingSupported());
|
||||
return base::BindOnce(
|
||||
&internal::CriticalClosure::Run,
|
||||
Owned(new internal::CriticalClosure(task_name, std::move(closure))));
|
||||
OnceClosure closure,
|
||||
bool is_immediate) {
|
||||
if (is_immediate) {
|
||||
return base::BindOnce(&internal::ImmediateCriticalClosure::Run,
|
||||
Owned(new internal::ImmediateCriticalClosure(
|
||||
task_name, std::move(closure))));
|
||||
} else {
|
||||
return base::BindOnce(&internal::PendingCriticalClosure::Run,
|
||||
Owned(new internal::PendingCriticalClosure(
|
||||
task_name, std::move(closure))));
|
||||
}
|
||||
}
|
||||
|
||||
inline OnceClosure MakeCriticalClosure(const Location& posted_from,
|
||||
OnceClosure closure) {
|
||||
return MakeCriticalClosure(posted_from.ToString(), std::move(closure));
|
||||
OnceClosure closure,
|
||||
bool is_immediate) {
|
||||
return MakeCriticalClosure(posted_from.ToString(), std::move(closure),
|
||||
is_immediate);
|
||||
}
|
||||
|
||||
#else // BUILDFLAG(IS_IOS)
|
||||
|
||||
inline OnceClosure MakeCriticalClosure(StringPiece task_name,
|
||||
OnceClosure closure) {
|
||||
OnceClosure closure,
|
||||
bool is_immediate) {
|
||||
// No-op for platforms where the application does not need to acquire
|
||||
// background time for closures to finish when it goes into the background.
|
||||
return closure;
|
||||
}
|
||||
|
||||
inline OnceClosure MakeCriticalClosure(const Location& posted_from,
|
||||
OnceClosure closure) {
|
||||
OnceClosure closure,
|
||||
bool is_immediate) {
|
||||
return closure;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user